From 7a5582dc662c1135fec70f3f7ae832a7b509f0ff Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 16 Jan 2025 16:31:13 +0000 Subject: [PATCH] Deployed 2cc3c5f to 2.0.6 in docs with MkDocs 1.6.1 and mike 2.1.3 --- docs/2.0.6/404.html | 474 + docs/2.0.6/api/index.html | 30754 ++++++++++++++++ docs/2.0.6/assets/_mkdocstrings.css | 119 + docs/2.0.6/assets/images/favicon.png | Bin 0 -> 1870 bytes .../assets/javascripts/bundle.525ec568.min.js | 16 + .../javascripts/bundle.525ec568.min.js.map | 7 + .../javascripts/lunr/min/lunr.ar.min.js | 1 + .../javascripts/lunr/min/lunr.da.min.js | 18 + .../javascripts/lunr/min/lunr.de.min.js | 18 + .../javascripts/lunr/min/lunr.du.min.js | 18 + .../javascripts/lunr/min/lunr.el.min.js | 1 + .../javascripts/lunr/min/lunr.es.min.js | 18 + .../javascripts/lunr/min/lunr.fi.min.js | 18 + .../javascripts/lunr/min/lunr.fr.min.js | 18 + .../javascripts/lunr/min/lunr.he.min.js | 1 + .../javascripts/lunr/min/lunr.hi.min.js | 1 + .../javascripts/lunr/min/lunr.hu.min.js | 18 + .../javascripts/lunr/min/lunr.hy.min.js | 1 + .../javascripts/lunr/min/lunr.it.min.js | 18 + .../javascripts/lunr/min/lunr.ja.min.js | 1 + .../javascripts/lunr/min/lunr.jp.min.js | 1 + .../javascripts/lunr/min/lunr.kn.min.js | 1 + .../javascripts/lunr/min/lunr.ko.min.js | 1 + .../javascripts/lunr/min/lunr.multi.min.js | 1 + .../javascripts/lunr/min/lunr.nl.min.js | 18 + .../javascripts/lunr/min/lunr.no.min.js | 18 + .../javascripts/lunr/min/lunr.pt.min.js | 18 + .../javascripts/lunr/min/lunr.ro.min.js | 18 + .../javascripts/lunr/min/lunr.ru.min.js | 18 + .../javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + .../javascripts/lunr/min/lunr.sv.min.js | 18 + .../javascripts/lunr/min/lunr.ta.min.js | 1 + .../javascripts/lunr/min/lunr.te.min.js | 1 + .../javascripts/lunr/min/lunr.th.min.js | 1 + .../javascripts/lunr/min/lunr.tr.min.js | 18 + .../javascripts/lunr/min/lunr.vi.min.js | 1 + .../javascripts/lunr/min/lunr.zh.min.js | 1 + docs/2.0.6/assets/javascripts/lunr/tinyseg.js | 206 + docs/2.0.6/assets/javascripts/lunr/wordcut.js | 6708 ++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + .../assets/stylesheets/main.8c3ca2c6.min.css | 1 + .../stylesheets/main.8c3ca2c6.min.css.map | 1 + .../stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + docs/2.0.6/changelog/index.html | 1655 + docs/2.0.6/contributing/index.html | 715 + docs/2.0.6/get_started/index.html | 2447 ++ docs/2.0.6/index.html | 608 + docs/2.0.6/installation/index.html | 635 + docs/2.0.6/metadata_creation/index.html | 2469 ++ docs/2.0.6/objects.inv | Bin 0 -> 768 bytes docs/2.0.6/search/search_index.json | 1 + docs/2.0.6/sitemap.xml | 35 + docs/2.0.6/sitemap.xml.gz | Bin 0 -> 267 bytes docs/2.0.6/usage/index.html | 621 + docs/latest | 2 +- docs/versions.json | 13 +- 59 files changed, 47820 insertions(+), 5 deletions(-) create mode 100644 docs/2.0.6/404.html create mode 100644 docs/2.0.6/api/index.html create mode 100644 docs/2.0.6/assets/_mkdocstrings.css create mode 100644 docs/2.0.6/assets/images/favicon.png create mode 100644 docs/2.0.6/assets/javascripts/bundle.525ec568.min.js create mode 100644 docs/2.0.6/assets/javascripts/bundle.525ec568.min.js.map create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/tinyseg.js create mode 100644 docs/2.0.6/assets/javascripts/lunr/wordcut.js create mode 100644 docs/2.0.6/assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 docs/2.0.6/assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 docs/2.0.6/assets/stylesheets/main.8c3ca2c6.min.css create mode 100644 docs/2.0.6/assets/stylesheets/main.8c3ca2c6.min.css.map create mode 100644 docs/2.0.6/assets/stylesheets/palette.06af60db.min.css create mode 100644 docs/2.0.6/assets/stylesheets/palette.06af60db.min.css.map create mode 100644 docs/2.0.6/changelog/index.html create mode 100644 docs/2.0.6/contributing/index.html create mode 100644 docs/2.0.6/get_started/index.html create mode 100644 docs/2.0.6/index.html create mode 100644 docs/2.0.6/installation/index.html create mode 100644 docs/2.0.6/metadata_creation/index.html create mode 100644 docs/2.0.6/objects.inv create mode 100644 docs/2.0.6/search/search_index.json create mode 100644 docs/2.0.6/sitemap.xml create mode 100644 docs/2.0.6/sitemap.xml.gz create mode 100644 docs/2.0.6/usage/index.html diff --git a/docs/2.0.6/404.html b/docs/2.0.6/404.html new file mode 100644 index 0000000..09a46be --- /dev/null +++ b/docs/2.0.6/404.html @@ -0,0 +1,474 @@ + + + + + + + + + + + + + + + + + + + fusion + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/docs/2.0.6/api/index.html b/docs/2.0.6/api/index.html new file mode 100644 index 0000000..31252e8 --- /dev/null +++ b/docs/2.0.6/api/index.html @@ -0,0 +1,30754 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Modules - fusion + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Modules

+ +
+ + + + +
+ +

Main Fusion module.

+ + + +
+ + + + + + + + +
+ + + +

+ Fusion + + +

+ + +
+ + +

Core Fusion class for API access.

+ +
+ Source code in py_src/fusion/fusion.py +
  64
+  65
+  66
+  67
+  68
+  69
+  70
+  71
+  72
+  73
+  74
+  75
+  76
+  77
+  78
+  79
+  80
+  81
+  82
+  83
+  84
+  85
+  86
+  87
+  88
+  89
+  90
+  91
+  92
+  93
+  94
+  95
+  96
+  97
+  98
+  99
+ 100
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+ 111
+ 112
+ 113
+ 114
+ 115
+ 116
+ 117
+ 118
+ 119
+ 120
+ 121
+ 122
+ 123
+ 124
+ 125
+ 126
+ 127
+ 128
+ 129
+ 130
+ 131
+ 132
+ 133
+ 134
+ 135
+ 136
+ 137
+ 138
+ 139
+ 140
+ 141
+ 142
+ 143
+ 144
+ 145
+ 146
+ 147
+ 148
+ 149
+ 150
+ 151
+ 152
+ 153
+ 154
+ 155
+ 156
+ 157
+ 158
+ 159
+ 160
+ 161
+ 162
+ 163
+ 164
+ 165
+ 166
+ 167
+ 168
+ 169
+ 170
+ 171
+ 172
+ 173
+ 174
+ 175
+ 176
+ 177
+ 178
+ 179
+ 180
+ 181
+ 182
+ 183
+ 184
+ 185
+ 186
+ 187
+ 188
+ 189
+ 190
+ 191
+ 192
+ 193
+ 194
+ 195
+ 196
+ 197
+ 198
+ 199
+ 200
+ 201
+ 202
+ 203
+ 204
+ 205
+ 206
+ 207
+ 208
+ 209
+ 210
+ 211
+ 212
+ 213
+ 214
+ 215
+ 216
+ 217
+ 218
+ 219
+ 220
+ 221
+ 222
+ 223
+ 224
+ 225
+ 226
+ 227
+ 228
+ 229
+ 230
+ 231
+ 232
+ 233
+ 234
+ 235
+ 236
+ 237
+ 238
+ 239
+ 240
+ 241
+ 242
+ 243
+ 244
+ 245
+ 246
+ 247
+ 248
+ 249
+ 250
+ 251
+ 252
+ 253
+ 254
+ 255
+ 256
+ 257
+ 258
+ 259
+ 260
+ 261
+ 262
+ 263
+ 264
+ 265
+ 266
+ 267
+ 268
+ 269
+ 270
+ 271
+ 272
+ 273
+ 274
+ 275
+ 276
+ 277
+ 278
+ 279
+ 280
+ 281
+ 282
+ 283
+ 284
+ 285
+ 286
+ 287
+ 288
+ 289
+ 290
+ 291
+ 292
+ 293
+ 294
+ 295
+ 296
+ 297
+ 298
+ 299
+ 300
+ 301
+ 302
+ 303
+ 304
+ 305
+ 306
+ 307
+ 308
+ 309
+ 310
+ 311
+ 312
+ 313
+ 314
+ 315
+ 316
+ 317
+ 318
+ 319
+ 320
+ 321
+ 322
+ 323
+ 324
+ 325
+ 326
+ 327
+ 328
+ 329
+ 330
+ 331
+ 332
+ 333
+ 334
+ 335
+ 336
+ 337
+ 338
+ 339
+ 340
+ 341
+ 342
+ 343
+ 344
+ 345
+ 346
+ 347
+ 348
+ 349
+ 350
+ 351
+ 352
+ 353
+ 354
+ 355
+ 356
+ 357
+ 358
+ 359
+ 360
+ 361
+ 362
+ 363
+ 364
+ 365
+ 366
+ 367
+ 368
+ 369
+ 370
+ 371
+ 372
+ 373
+ 374
+ 375
+ 376
+ 377
+ 378
+ 379
+ 380
+ 381
+ 382
+ 383
+ 384
+ 385
+ 386
+ 387
+ 388
+ 389
+ 390
+ 391
+ 392
+ 393
+ 394
+ 395
+ 396
+ 397
+ 398
+ 399
+ 400
+ 401
+ 402
+ 403
+ 404
+ 405
+ 406
+ 407
+ 408
+ 409
+ 410
+ 411
+ 412
+ 413
+ 414
+ 415
+ 416
+ 417
+ 418
+ 419
+ 420
+ 421
+ 422
+ 423
+ 424
+ 425
+ 426
+ 427
+ 428
+ 429
+ 430
+ 431
+ 432
+ 433
+ 434
+ 435
+ 436
+ 437
+ 438
+ 439
+ 440
+ 441
+ 442
+ 443
+ 444
+ 445
+ 446
+ 447
+ 448
+ 449
+ 450
+ 451
+ 452
+ 453
+ 454
+ 455
+ 456
+ 457
+ 458
+ 459
+ 460
+ 461
+ 462
+ 463
+ 464
+ 465
+ 466
+ 467
+ 468
+ 469
+ 470
+ 471
+ 472
+ 473
+ 474
+ 475
+ 476
+ 477
+ 478
+ 479
+ 480
+ 481
+ 482
+ 483
+ 484
+ 485
+ 486
+ 487
+ 488
+ 489
+ 490
+ 491
+ 492
+ 493
+ 494
+ 495
+ 496
+ 497
+ 498
+ 499
+ 500
+ 501
+ 502
+ 503
+ 504
+ 505
+ 506
+ 507
+ 508
+ 509
+ 510
+ 511
+ 512
+ 513
+ 514
+ 515
+ 516
+ 517
+ 518
+ 519
+ 520
+ 521
+ 522
+ 523
+ 524
+ 525
+ 526
+ 527
+ 528
+ 529
+ 530
+ 531
+ 532
+ 533
+ 534
+ 535
+ 536
+ 537
+ 538
+ 539
+ 540
+ 541
+ 542
+ 543
+ 544
+ 545
+ 546
+ 547
+ 548
+ 549
+ 550
+ 551
+ 552
+ 553
+ 554
+ 555
+ 556
+ 557
+ 558
+ 559
+ 560
+ 561
+ 562
+ 563
+ 564
+ 565
+ 566
+ 567
+ 568
+ 569
+ 570
+ 571
+ 572
+ 573
+ 574
+ 575
+ 576
+ 577
+ 578
+ 579
+ 580
+ 581
+ 582
+ 583
+ 584
+ 585
+ 586
+ 587
+ 588
+ 589
+ 590
+ 591
+ 592
+ 593
+ 594
+ 595
+ 596
+ 597
+ 598
+ 599
+ 600
+ 601
+ 602
+ 603
+ 604
+ 605
+ 606
+ 607
+ 608
+ 609
+ 610
+ 611
+ 612
+ 613
+ 614
+ 615
+ 616
+ 617
+ 618
+ 619
+ 620
+ 621
+ 622
+ 623
+ 624
+ 625
+ 626
+ 627
+ 628
+ 629
+ 630
+ 631
+ 632
+ 633
+ 634
+ 635
+ 636
+ 637
+ 638
+ 639
+ 640
+ 641
+ 642
+ 643
+ 644
+ 645
+ 646
+ 647
+ 648
+ 649
+ 650
+ 651
+ 652
+ 653
+ 654
+ 655
+ 656
+ 657
+ 658
+ 659
+ 660
+ 661
+ 662
+ 663
+ 664
+ 665
+ 666
+ 667
+ 668
+ 669
+ 670
+ 671
+ 672
+ 673
+ 674
+ 675
+ 676
+ 677
+ 678
+ 679
+ 680
+ 681
+ 682
+ 683
+ 684
+ 685
+ 686
+ 687
+ 688
+ 689
+ 690
+ 691
+ 692
+ 693
+ 694
+ 695
+ 696
+ 697
+ 698
+ 699
+ 700
+ 701
+ 702
+ 703
+ 704
+ 705
+ 706
+ 707
+ 708
+ 709
+ 710
+ 711
+ 712
+ 713
+ 714
+ 715
+ 716
+ 717
+ 718
+ 719
+ 720
+ 721
+ 722
+ 723
+ 724
+ 725
+ 726
+ 727
+ 728
+ 729
+ 730
+ 731
+ 732
+ 733
+ 734
+ 735
+ 736
+ 737
+ 738
+ 739
+ 740
+ 741
+ 742
+ 743
+ 744
+ 745
+ 746
+ 747
+ 748
+ 749
+ 750
+ 751
+ 752
+ 753
+ 754
+ 755
+ 756
+ 757
+ 758
+ 759
+ 760
+ 761
+ 762
+ 763
+ 764
+ 765
+ 766
+ 767
+ 768
+ 769
+ 770
+ 771
+ 772
+ 773
+ 774
+ 775
+ 776
+ 777
+ 778
+ 779
+ 780
+ 781
+ 782
+ 783
+ 784
+ 785
+ 786
+ 787
+ 788
+ 789
+ 790
+ 791
+ 792
+ 793
+ 794
+ 795
+ 796
+ 797
+ 798
+ 799
+ 800
+ 801
+ 802
+ 803
+ 804
+ 805
+ 806
+ 807
+ 808
+ 809
+ 810
+ 811
+ 812
+ 813
+ 814
+ 815
+ 816
+ 817
+ 818
+ 819
+ 820
+ 821
+ 822
+ 823
+ 824
+ 825
+ 826
+ 827
+ 828
+ 829
+ 830
+ 831
+ 832
+ 833
+ 834
+ 835
+ 836
+ 837
+ 838
+ 839
+ 840
+ 841
+ 842
+ 843
+ 844
+ 845
+ 846
+ 847
+ 848
+ 849
+ 850
+ 851
+ 852
+ 853
+ 854
+ 855
+ 856
+ 857
+ 858
+ 859
+ 860
+ 861
+ 862
+ 863
+ 864
+ 865
+ 866
+ 867
+ 868
+ 869
+ 870
+ 871
+ 872
+ 873
+ 874
+ 875
+ 876
+ 877
+ 878
+ 879
+ 880
+ 881
+ 882
+ 883
+ 884
+ 885
+ 886
+ 887
+ 888
+ 889
+ 890
+ 891
+ 892
+ 893
+ 894
+ 895
+ 896
+ 897
+ 898
+ 899
+ 900
+ 901
+ 902
+ 903
+ 904
+ 905
+ 906
+ 907
+ 908
+ 909
+ 910
+ 911
+ 912
+ 913
+ 914
+ 915
+ 916
+ 917
+ 918
+ 919
+ 920
+ 921
+ 922
+ 923
+ 924
+ 925
+ 926
+ 927
+ 928
+ 929
+ 930
+ 931
+ 932
+ 933
+ 934
+ 935
+ 936
+ 937
+ 938
+ 939
+ 940
+ 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
+1506
+1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
+1544
+1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
+1629
+1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
+1642
+1643
+1644
+1645
+1646
+1647
+1648
+1649
+1650
+1651
+1652
+1653
+1654
+1655
+1656
+1657
+1658
+1659
+1660
+1661
+1662
+1663
+1664
+1665
+1666
+1667
+1668
+1669
+1670
+1671
+1672
+1673
+1674
+1675
+1676
+1677
+1678
+1679
+1680
+1681
+1682
+1683
+1684
+1685
+1686
+1687
+1688
+1689
+1690
+1691
+1692
+1693
+1694
+1695
+1696
+1697
+1698
+1699
+1700
+1701
+1702
+1703
+1704
+1705
+1706
+1707
+1708
+1709
+1710
+1711
+1712
+1713
+1714
+1715
+1716
+1717
+1718
+1719
+1720
+1721
+1722
+1723
+1724
+1725
+1726
+1727
+1728
+1729
+1730
+1731
+1732
+1733
+1734
+1735
+1736
+1737
+1738
+1739
+1740
+1741
+1742
+1743
+1744
+1745
+1746
+1747
+1748
+1749
+1750
+1751
+1752
+1753
+1754
+1755
+1756
+1757
+1758
+1759
+1760
+1761
+1762
+1763
+1764
+1765
+1766
+1767
+1768
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+1777
+1778
+1779
+1780
+1781
+1782
+1783
+1784
+1785
+1786
+1787
+1788
+1789
+1790
+1791
+1792
+1793
+1794
+1795
+1796
+1797
+1798
+1799
+1800
+1801
+1802
+1803
+1804
+1805
+1806
+1807
+1808
+1809
+1810
+1811
+1812
+1813
+1814
+1815
+1816
+1817
+1818
+1819
+1820
+1821
+1822
+1823
+1824
+1825
+1826
+1827
+1828
+1829
+1830
+1831
+1832
+1833
+1834
+1835
+1836
+1837
+1838
+1839
+1840
+1841
+1842
+1843
+1844
+1845
+1846
+1847
+1848
+1849
+1850
+1851
+1852
+1853
+1854
+1855
+1856
+1857
+1858
+1859
+1860
+1861
+1862
+1863
+1864
+1865
+1866
+1867
+1868
+1869
+1870
+1871
+1872
+1873
+1874
+1875
+1876
+1877
+1878
+1879
+1880
+1881
+1882
+1883
+1884
+1885
+1886
+1887
+1888
+1889
+1890
+1891
+1892
+1893
+1894
+1895
+1896
+1897
+1898
+1899
+1900
+1901
+1902
+1903
+1904
+1905
+1906
+1907
+1908
+1909
+1910
+1911
+1912
+1913
+1914
+1915
+1916
+1917
+1918
+1919
+1920
+1921
+1922
+1923
+1924
+1925
+1926
+1927
+1928
+1929
+1930
+1931
+1932
+1933
+1934
+1935
+1936
+1937
+1938
+1939
+1940
+1941
+1942
+1943
+1944
+1945
+1946
+1947
+1948
+1949
+1950
+1951
+1952
+1953
+1954
+1955
+1956
+1957
+1958
+1959
+1960
+1961
+1962
+1963
+1964
+1965
+1966
+1967
+1968
+1969
+1970
+1971
+1972
+1973
+1974
+1975
+1976
+1977
+1978
+1979
+1980
+1981
+1982
+1983
+1984
+1985
+1986
+1987
+1988
+1989
+1990
+1991
+1992
+1993
+1994
+1995
+1996
+1997
+1998
+1999
+2000
+2001
+2002
+2003
+2004
+2005
+2006
+2007
+2008
+2009
+2010
+2011
+2012
+2013
+2014
+2015
+2016
+2017
+2018
+2019
+2020
+2021
+2022
+2023
+2024
+2025
+2026
+2027
+2028
+2029
+2030
+2031
+2032
+2033
+2034
+2035
+2036
+2037
+2038
+2039
+2040
+2041
+2042
+2043
+2044
+2045
+2046
+2047
+2048
+2049
+2050
+2051
+2052
+2053
+2054
+2055
+2056
+2057
+2058
+2059
+2060
+2061
+2062
+2063
+2064
+2065
+2066
+2067
+2068
+2069
+2070
+2071
+2072
+2073
+2074
+2075
+2076
+2077
+2078
+2079
+2080
+2081
+2082
+2083
+2084
+2085
+2086
+2087
+2088
+2089
+2090
+2091
+2092
+2093
+2094
+2095
+2096
+2097
+2098
+2099
+2100
+2101
+2102
+2103
+2104
+2105
+2106
+2107
+2108
+2109
+2110
+2111
+2112
+2113
+2114
+2115
+2116
+2117
+2118
+2119
+2120
+2121
+2122
+2123
+2124
+2125
+2126
+2127
+2128
+2129
+2130
+2131
+2132
+2133
+2134
+2135
+2136
+2137
+2138
+2139
+2140
+2141
+2142
+2143
+2144
+2145
+2146
+2147
+2148
+2149
+2150
+2151
+2152
+2153
+2154
+2155
+2156
+2157
+2158
+2159
+2160
+2161
+2162
+2163
+2164
+2165
+2166
+2167
+2168
+2169
+2170
+2171
+2172
+2173
+2174
+2175
+2176
+2177
+2178
+2179
+2180
+2181
+2182
+2183
+2184
+2185
+2186
+2187
+2188
+2189
+2190
+2191
+2192
+2193
+2194
+2195
+2196
+2197
+2198
+2199
+2200
+2201
+2202
+2203
+2204
+2205
+2206
+2207
+2208
+2209
+2210
+2211
+2212
+2213
+2214
+2215
+2216
+2217
+2218
+2219
+2220
+2221
+2222
+2223
+2224
+2225
+2226
+2227
+2228
+2229
+2230
+2231
+2232
+2233
+2234
+2235
+2236
+2237
+2238
+2239
+2240
+2241
+2242
+2243
+2244
+2245
+2246
+2247
+2248
+2249
+2250
+2251
+2252
+2253
+2254
+2255
+2256
+2257
+2258
+2259
+2260
+2261
+2262
+2263
+2264
+2265
+2266
+2267
+2268
+2269
+2270
+2271
+2272
+2273
+2274
+2275
+2276
+2277
+2278
+2279
+2280
+2281
+2282
+2283
+2284
+2285
+2286
+2287
+2288
+2289
+2290
+2291
+2292
+2293
+2294
+2295
+2296
+2297
+2298
+2299
+2300
+2301
+2302
+2303
+2304
+2305
+2306
+2307
+2308
+2309
+2310
+2311
+2312
+2313
+2314
+2315
+2316
+2317
+2318
+2319
+2320
+2321
+2322
+2323
+2324
+2325
+2326
+2327
+2328
+2329
+2330
+2331
+2332
+2333
+2334
+2335
+2336
+2337
+2338
+2339
+2340
+2341
+2342
+2343
+2344
+2345
+2346
+2347
+2348
+2349
+2350
+2351
+2352
+2353
+2354
+2355
+2356
+2357
+2358
+2359
+2360
+2361
+2362
+2363
+2364
+2365
+2366
+2367
+2368
+2369
+2370
+2371
+2372
+2373
+2374
+2375
+2376
+2377
+2378
+2379
+2380
+2381
+2382
+2383
+2384
+2385
+2386
+2387
+2388
+2389
+2390
+2391
+2392
+2393
+2394
+2395
+2396
+2397
+2398
+2399
+2400
+2401
+2402
+2403
+2404
+2405
+2406
+2407
+2408
+2409
+2410
+2411
+2412
+2413
+2414
+2415
+2416
+2417
+2418
+2419
+2420
+2421
+2422
+2423
+2424
+2425
+2426
+2427
+2428
+2429
+2430
+2431
+2432
+2433
+2434
+2435
+2436
+2437
class Fusion:
+    """Core Fusion class for API access."""
+
+    @staticmethod
+    def _call_for_dataframe(url: str, session: requests.Session) -> pd.DataFrame:
+        """Private function that calls an API endpoint and returns the data as a pandas dataframe.
+
+        Args:
+            url (Union[FusionCredentials, Union[str, dict]): URL for an API endpoint with valid parameters.
+            session (requests.Session): Specify a proxy if required to access the authentication server. Defaults to {}.
+
+        Returns:
+            pandas.DataFrame: a dataframe containing the requested data.
+        """
+        response = session.get(url)
+        response.raise_for_status()
+        table = response.json()["resources"]
+        ret_df = pd.DataFrame(table).reset_index(drop=True)
+        return ret_df
+
+    @staticmethod
+    def _call_for_bytes_object(url: str, session: requests.Session) -> BytesIO:
+        """Private function that calls an API endpoint and returns the data as a bytes object in memory.
+
+        Args:
+            url (Union[FusionCredentials, Union[str, dict]): URL for an API endpoint with valid parameters.
+            session (requests.Session): Specify a proxy if required to access the authentication server. Defaults to {}.
+
+        Returns:
+            io.BytesIO: in memory file content
+        """
+
+        response = session.get(url)
+        response.raise_for_status()
+
+        return BytesIO(response.content)
+
+    def __init__(
+        self,
+        credentials: str | FusionCredentials = "config/client_credentials.json",
+        root_url: str = "https://fusion.jpmorgan.com/api/v1/",
+        download_folder: str = "downloads",
+        log_level: int = logging.ERROR,
+        fs: fsspec.filesystem = None,
+        log_path: str = ".",
+    ) -> None:
+        """Constructor to instantiate a new Fusion object.
+
+        Args:
+            credentials (Union[str, FusionCredentials]): A path to a credentials file or a fully populated
+            FusionCredentials object. Defaults to 'config/client_credentials.json'.
+            root_url (_type_, optional): The API root URL.
+                Defaults to "https://fusion.jpmorgan.com/api/v1/".
+            download_folder (str, optional): The folder path where downloaded data files
+                are saved. Defaults to "downloads".
+            log_level (int, optional): Set the logging level. Defaults to logging.ERROR.
+            fs (fsspec.filesystem): filesystem.
+            log_path (str, optional): The folder path where the log is stored.
+        """
+        self._default_catalog = "common"
+
+        self.root_url = root_url
+        self.download_folder = download_folder
+        Path(download_folder).mkdir(parents=True, exist_ok=True)
+
+        if logger.hasHandlers():
+            logger.handlers.clear()
+        file_handler = logging.FileHandler(filename=f"{log_path}/fusion_sdk.log")
+        logging.addLevelName(VERBOSE_LVL, "VERBOSE")
+        stdout_handler = logging.StreamHandler(sys.stdout)
+        formatter = logging.Formatter(
+            "%(asctime)s.%(msecs)03d %(name)s:%(levelname)s %(message)s",
+            datefmt="%Y-%m-%d %H:%M:%S",
+        )
+        stdout_handler.setFormatter(formatter)
+        logger.addHandler(stdout_handler)
+        logger.addHandler(file_handler)
+        logger.setLevel(log_level)
+
+        if isinstance(credentials, FusionCredentials):
+            self.credentials = credentials
+        elif isinstance(credentials, str):
+            self.credentials = FusionCredentials.from_file(Path(credentials))
+        else:
+            raise ValueError("credentials must be a path to a credentials file or FusionCredentials object")
+
+        self.session = get_session(self.credentials, self.root_url)
+        self.fs = fs if fs else get_default_fs()
+        self.events: pd.DataFrame | None = None
+
+    def __repr__(self) -> str:
+        """Object representation to list all available methods."""
+        return "Fusion object \nAvailable methods:\n" + tabulate(
+            pd.DataFrame(  # type: ignore
+                [
+                    [
+                        method_name
+                        for method_name in dir(Fusion)
+                        if callable(getattr(Fusion, method_name)) and not method_name.startswith("_")
+                    ]
+                    + [p for p in dir(Fusion) if isinstance(getattr(Fusion, p), property)],
+                    [
+                        getattr(Fusion, method_name).__doc__.split("\n")[0]
+                        for method_name in dir(Fusion)
+                        if callable(getattr(Fusion, method_name)) and not method_name.startswith("_")
+                    ]
+                    + [
+                        getattr(Fusion, p).__doc__.split("\n")[0]
+                        for p in dir(Fusion)
+                        if isinstance(getattr(Fusion, p), property)
+                    ],
+                ]
+            ).T.set_index(0),
+            tablefmt="psql",
+        )
+
+    @property
+    def default_catalog(self) -> str:
+        """Returns the default catalog.
+
+        Returns:
+            None
+        """
+        return self._default_catalog
+
+    @default_catalog.setter
+    def default_catalog(self, catalog: str) -> None:
+        """Allow the default catalog, which is "common" to be overridden.
+
+        Args:
+            catalog (str): The catalog to use as the default
+
+        Returns:
+            None
+        """
+        self._default_catalog = catalog
+
+    def _use_catalog(self, catalog: str | None) -> str:
+        """Determine which catalog to use in an API call.
+
+        Args:
+            catalog (str): The catalog value passed as an argument to an API function wrapper.
+
+        Returns:
+            str: The catalog to use
+        """
+        if catalog is None:
+            return self.default_catalog
+
+        return catalog
+
+    def get_fusion_filesystem(self) -> FusionHTTPFileSystem:
+        """Creates Fusion Filesystem.
+
+        Returns: Fusion Filesystem
+
+        """
+        return FusionHTTPFileSystem(client_kwargs={"root_url": self.root_url, "credentials": self.credentials})
+
+    def list_catalogs(self, output: bool = False) -> pd.DataFrame:
+        """Lists the catalogs available to the API account.
+
+        Args:
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each catalog
+        """
+        url = f"{self.root_url}catalogs/"
+        cat_df = Fusion._call_for_dataframe(url, self.session)
+
+        if output:
+            pass
+
+        return cat_df
+
+    def catalog_resources(self, catalog: str | None = None, output: bool = False) -> pd.DataFrame:
+        """List the resources contained within the catalog, for example products and datasets.
+
+        Args:
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+
+        Returns:
+           class:`pandas.DataFrame`: A dataframe with a row for each resource within the catalog
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}"
+        cat_df = Fusion._call_for_dataframe(url, self.session)
+
+        if output:
+            pass
+
+        return cat_df
+
+    def list_products(
+        self,
+        contains: str | list[str] | None = None,
+        id_contains: bool = False,
+        catalog: str | None = None,
+        output: bool = False,
+        max_results: int = -1,
+        display_all_columns: bool = False,
+    ) -> pd.DataFrame:
+        """Get the products contained in a catalog. A product is a grouping of datasets.
+
+        Args:
+            contains (Union[str, list], optional): A string or a list of strings that are product
+                identifiers to filter the products list. If a list is provided then it will return
+                products whose identifier matches any of the strings. Defaults to None.
+            id_contains (bool): Filter datasets only where the string(s) are contained in the identifier,
+                ignoring description.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            max_results (int, optional): Limit the number of rows returned in the dataframe.
+                Defaults to -1 which returns all results.
+            display_all_columns (bool, optional): If True displays all columns returned by the API,
+                otherwise only the key columns are displayed
+
+        Returns:
+            class:`pandas.DataFrame`: a dataframe with a row for each product
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/products"
+        full_prod_df: pd.DataFrame = Fusion._call_for_dataframe(url, self.session)
+
+        if contains:
+            if isinstance(contains, list):
+                contains = "|".join(f"{s}" for s in contains)
+            if id_contains:
+                filtered_df = full_prod_df[full_prod_df["identifier"].str.contains(contains, case=False)]
+            else:
+                filtered_df = full_prod_df[
+                    full_prod_df["identifier"].str.contains(contains, case=False)
+                    | full_prod_df["description"].str.contains(contains, case=False)
+                ]
+        else:
+            filtered_df = full_prod_df
+
+        filtered_df["category"] = filtered_df.category.str.join(", ")
+        filtered_df["region"] = filtered_df.region.str.join(", ")
+        if not display_all_columns:
+            filtered_df = filtered_df[
+                filtered_df.columns.intersection(
+                    [
+                        "identifier",
+                        "title",
+                        "region",
+                        "category",
+                        "status",
+                        "description",
+                    ]
+                )
+            ]
+
+        if max_results > -1:
+            filtered_df = filtered_df[0:max_results]
+
+        if output:
+            pass
+
+        return filtered_df
+
+    def list_datasets(  # noqa: PLR0913
+        self,
+        contains: str | list[str] | None = None,
+        id_contains: bool = False,
+        product: str | list[str] | None = None,
+        catalog: str | None = None,
+        output: bool = False,
+        max_results: int = -1,
+        display_all_columns: bool = False,
+        status: str | None = None,
+        dataset_type: str | None = None,
+    ) -> pd.DataFrame:
+        """Get the datasets contained in a catalog.
+
+        Args:
+            contains (Union[str, list], optional): A string or a list of strings that are dataset
+                identifiers to filter the datasets list. If a list is provided then it will return
+                datasets whose identifier matches any of the strings. Defaults to None.
+            id_contains (bool): Filter datasets only where the string(s) are contained in the identifier,
+                ignoring description.
+            product (Union[str, list], optional): A string or a list of strings that are product
+                identifiers to filter the datasets list. Defaults to None.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            max_results (int, optional): Limit the number of rows returned in the dataframe.
+                Defaults to -1 which returns all results.
+            display_all_columns (bool, optional): If True displays all columns returned by the API,
+                otherwise only the key columns are displayed
+            status (str, optional): filter the datasets by status, default is to show all results.
+            dataset_type (str, optional): filter the datasets by type, default is to show all results.
+
+        Returns:
+            class:`pandas.DataFrame`: a dataframe with a row for each dataset.
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets"
+        ds_df = Fusion._call_for_dataframe(url, self.session)
+
+        if contains:
+            if isinstance(contains, list):
+                contains = "|".join(f"{s}" for s in contains)
+            if id_contains:
+                ds_df = ds_df[ds_df["identifier"].str.contains(contains, case=False)]
+            else:
+                ds_df = ds_df[
+                    ds_df["identifier"].str.contains(contains, case=False)
+                    | ds_df["description"].str.contains(contains, case=False)
+                ]
+
+        if product:
+            url = f"{self.root_url}catalogs/{catalog}/productDatasets"
+            prd_df = Fusion._call_for_dataframe(url, self.session)
+            prd_df = (
+                prd_df[prd_df["product"] == product]
+                if isinstance(product, str)
+                else prd_df[prd_df["product"].isin(product)]
+            )
+            ds_df = ds_df[ds_df["identifier"].str.lower().isin(prd_df["dataset"].str.lower())].reset_index(drop=True)
+
+        if max_results > -1:
+            ds_df = ds_df[0:max_results]
+
+        ds_df["category"] = ds_df.category.str.join(", ")
+        ds_df["region"] = ds_df.region.str.join(", ")
+        if not display_all_columns:
+            cols = [
+                "identifier",
+                "title",
+                "containerType",
+                "region",
+                "category",
+                "coverageStartDate",
+                "coverageEndDate",
+                "description",
+                "status",
+                "type",
+            ]
+            cols = [c for c in cols if c in ds_df.columns]
+            ds_df = ds_df[cols]
+
+        if status is not None:
+            ds_df = ds_df[ds_df["status"] == status]
+
+        if dataset_type is not None:
+            ds_df = ds_df[ds_df["type"] == dataset_type]
+
+        if output:
+            pass
+
+        return ds_df
+
+    def dataset_resources(self, dataset: str, catalog: str | None = None, output: bool = False) -> pd.DataFrame:
+        """List the resources available for a dataset, currently this will always be a datasetseries.
+
+        Args:
+            dataset (str): A dataset identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each resource
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}"
+        ds_res_df = Fusion._call_for_dataframe(url, self.session)
+
+        if output:
+            pass
+
+        return ds_res_df
+
+    def list_dataset_attributes(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        output: bool = False,
+        display_all_columns: bool = False,
+    ) -> pd.DataFrame:
+        """Returns the list of attributes that are in the dataset.
+
+        Args:
+            dataset (str): A dataset identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            display_all_columns (bool, optional): If True displays all columns returned by the API,
+                otherwise only the key columns are displayed
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each attribute
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+        ds_attr_df = Fusion._call_for_dataframe(url, self.session)
+
+        if "index" in ds_attr_df.columns: 
+            ds_attr_df = ds_attr_df.sort_values(by="index").reset_index(drop=True)
+
+        if not display_all_columns:
+            ds_attr_df = ds_attr_df[
+                ds_attr_df.columns.intersection(
+                    [
+                        "identifier",
+                        "title",
+                        "dataType",
+                        "isDatasetKey",
+                        "description",
+                        "source",
+                    ]
+                )
+            ]
+
+        if output:
+            pass
+
+        return ds_attr_df
+
+    def list_datasetmembers(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        output: bool = False,
+        max_results: int = -1,
+    ) -> pd.DataFrame:
+        """List the available members in the dataset series.
+
+        Args:
+            dataset (str): A dataset identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            max_results (int, optional): Limit the number of rows returned in the dataframe.
+                Defaults to -1 which returns all results.
+
+        Returns:
+            class:`pandas.DataFrame`: a dataframe with a row for each dataset member.
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries"
+        ds_members_df = Fusion._call_for_dataframe(url, self.session)
+
+        if max_results > -1:
+            ds_members_df = ds_members_df[0:max_results]
+
+        if output:
+            pass
+
+        return ds_members_df
+
+    def datasetmember_resources(
+        self,
+        dataset: str,
+        series: str,
+        catalog: str | None = None,
+        output: bool = False,
+    ) -> pd.DataFrame:
+        """List the available resources for a datasetseries member.
+
+        Args:
+            dataset (str): A dataset identifier
+            series (str): The datasetseries identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each datasetseries member resource.
+                Currently, this will always be distributions.
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series}"
+        ds_mem_res_df = Fusion._call_for_dataframe(url, self.session)
+
+        if output:
+            pass
+
+        return ds_mem_res_df
+
+    def list_distributions(
+        self,
+        dataset: str,
+        series: str,
+        catalog: str | None = None,
+        output: bool = False,
+    ) -> pd.DataFrame:
+        """List the available distributions (downloadable instances of the dataset with a format type).
+
+        Args:
+            dataset (str): A dataset identifier
+            series (str): The datasetseries identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each distribution.
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series}/distributions"
+        distros_df = Fusion._call_for_dataframe(url, self.session)
+
+        if output:
+            pass
+
+        return distros_df
+
+    def _resolve_distro_tuples(
+        self,
+        dataset: str,
+        dt_str: str = "latest",
+        dataset_format: str = "parquet",
+        catalog: str | None = None,
+    ) -> list[tuple[str, str, str, str]]:
+        """Resolve distribution tuples given specification params.
+
+        A private utility function to generate a list of distribution tuples.
+        Each tuple is a distribution, identified by catalog, dataset id,
+        datasetseries member id, and the file format.
+
+        Args:
+            dataset (str): A dataset identifier
+            dt_str (str, optional): Either a single date or a range identified by a start or end date,
+                or both separated with a ":". Defaults to 'latest' which will return the most recent
+                instance of the dataset.
+            dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+
+        Returns:
+            list: a list of tuples, one for each distribution
+        """
+        catalog = self._use_catalog(catalog)
+
+        datasetseries_list = self.list_datasetmembers(dataset, catalog)
+        if len(datasetseries_list) == 0:
+            raise AssertionError(f"There are no dataset members for dataset {dataset} in catalog {catalog}")
+
+        if datasetseries_list.empty:
+            raise APIResponseError(  # pragma: no cover
+                f"No data available for dataset {dataset}. "
+                f"Check that a valid dataset identifier and date/date range has been set."
+            )
+
+        if dt_str == "latest":
+            dt_str = (
+                datasetseries_list[
+                    datasetseries_list["createdDate"] == datasetseries_list["createdDate"].to_numpy().max()
+                ]
+                .sort_values(by="identifier")
+                .iloc[-1]["identifier"]
+            )
+            datasetseries_list = datasetseries_list[datasetseries_list["identifier"] == dt_str]
+        else:
+            parsed_dates = normalise_dt_param_str(dt_str)
+            if len(parsed_dates) == 1:
+                parsed_dates = (parsed_dates[0], parsed_dates[0])
+
+            if parsed_dates[0]:
+                datasetseries_list = datasetseries_list[
+                    pd.Series([pd.to_datetime(i, errors="coerce") for i in datasetseries_list["identifier"]])
+                    >= pd.to_datetime(parsed_dates[0])
+                ].reset_index()
+
+            if parsed_dates[1]:
+                datasetseries_list = datasetseries_list[
+                    pd.Series([pd.to_datetime(i, errors="coerce") for i in datasetseries_list["identifier"]])
+                    <= pd.to_datetime(parsed_dates[1])
+                ].reset_index()
+
+        if len(datasetseries_list) == 0:
+            raise APIResponseError(  # pragma: no cover
+                f"No data available for dataset {dataset} in catalog {catalog}.\n"
+                f"Check that a valid dataset identifier and date/date range has been set."
+            )
+
+        required_series = list(datasetseries_list["@id"])
+        tups = [(catalog, dataset, series, dataset_format) for series in required_series]
+
+        return tups
+
+    def download(  # noqa: PLR0912, PLR0913
+        self,
+        dataset: str,
+        dt_str: str = "latest",
+        dataset_format: str = "parquet",
+        catalog: str | None = None,
+        n_par: int | None = None,
+        show_progress: bool = True,
+        force_download: bool = False,
+        download_folder: str | None = None,
+        return_paths: bool = False,
+        partitioning: str | None = None,
+        preserve_original_name: bool = False,
+    ) -> list[tuple[bool, str, str | None]] | None:
+        """Downloads the requested distributions of a dataset to disk.
+
+        Args:
+            dataset (str): A dataset identifier
+            dt_str (str, optional): Either a single date or a range identified by a start or end date,
+                or both separated with a ":". Defaults to 'latest' which will return the most recent
+                instance of the dataset. If more than one series member exists on the latest date, the
+                series member identifiers will be sorted alphabetically and the last one will be downloaded.
+            dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            n_par (int, optional): Specify how many distributions to download in parallel.
+                Defaults to all cpus available.
+            show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+            force_download (bool, optional): If True then will always download a file even
+                if it is already on disk. Defaults to True.
+            download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+                Defaults to download_folder as set in __init__
+            return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+            partitioning (str, optional): Partitioning specification.
+            preserve_original_name (bool, optional): Preserve the original name of the file. Defaults to False.
+
+        Returns:
+
+        """
+        catalog = self._use_catalog(catalog)
+
+        valid_date_range = re.compile(r"^(\d{4}\d{2}\d{2})$|^((\d{4}\d{2}\d{2})?([:])(\d{4}\d{2}\d{2})?)$")
+
+        if valid_date_range.match(dt_str) or dt_str == "latest":
+            required_series = self._resolve_distro_tuples(dataset, dt_str, dataset_format, catalog)
+        else:
+            # sample data is limited to csv
+            if dt_str == "sample":
+                dataset_format = self.list_distributions(dataset, dt_str, catalog)["identifier"].iloc[0]
+            required_series = [(catalog, dataset, dt_str, dataset_format)]
+
+        if dataset_format not in RECOGNIZED_FORMATS + ["raw"]:
+            raise ValueError(f"Dataset format {dataset_format} is not supported")
+
+        if not download_folder:
+            download_folder = self.download_folder
+
+        download_folders = [download_folder] * len(required_series)
+
+        if partitioning == "hive":
+            members = [series[2].strip("/") for series in required_series]
+            download_folders = [
+                f"{download_folders[i]}/{series[0]}/{series[1]}/{members[i]}"
+                for i, series in enumerate(required_series)
+            ]
+
+        for d in download_folders:
+            if not self.fs.exists(d):
+                self.fs.mkdir(d, create_parents=True)
+
+        n_par = cpu_count(n_par)
+        download_spec = [
+            {
+                "lfs": self.fs,
+                "rpath": distribution_to_url(
+                    self.root_url,
+                    series[1],
+                    series[2],
+                    series[3],
+                    series[0],
+                    is_download=True,
+                ),
+                "lpath": distribution_to_filename(
+                    download_folders[i],
+                    series[1],
+                    series[2],
+                    series[3],
+                    series[0],
+                    partitioning=partitioning,
+                ),
+                "overwrite": force_download,
+                "preserve_original_name": preserve_original_name,
+            }
+            for i, series in enumerate(required_series)
+        ]
+
+        logger.log(
+            VERBOSE_LVL,
+            f"Beginning {len(download_spec)} downloads in batches of {n_par}",
+        )
+        if show_progress:
+            with joblib_progress("Downloading", total=len(download_spec)):
+                res = Parallel(n_jobs=n_par)(
+                    delayed(self.get_fusion_filesystem().download)(**spec) for spec in download_spec
+                )
+        else:
+            res = Parallel(n_jobs=n_par)(
+                delayed(self.get_fusion_filesystem().download)(**spec) for spec in download_spec
+            )
+
+        if (len(res) > 0) and (not all(r[0] for r in res)):
+            for r in res:
+                if not r[0]:
+                    warnings.warn(f"The download of {r[1]} was not successful", stacklevel=2)
+        return res if return_paths else None
+
+    def to_df(  # noqa: PLR0913
+        self,
+        dataset: str,
+        dt_str: str = "latest",
+        dataset_format: str = "parquet",
+        catalog: str | None = None,
+        n_par: int | None = None,
+        show_progress: bool = True,
+        columns: list[str] | None = None,
+        filters: PyArrowFilterT | None = None,
+        force_download: bool = False,
+        download_folder: str | None = None,
+        dataframe_type: str = "pandas",
+        **kwargs: Any,
+    ) -> pd.DataFrame:
+        """Gets distributions for a specified date or date range and returns the data as a dataframe.
+
+        Args:
+            dataset (str): A dataset identifier
+            dt_str (str, optional): Either a single date or a range identified by a start or end date,
+                or both separated with a ":". Defaults to 'latest' which will return the most recent
+                instance of the dataset.
+            dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            n_par (int, optional): Specify how many distributions to download in parallel.
+                Defaults to all cpus available.
+            show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+            columns (List, optional): A list of columns to return from a parquet file. Defaults to None
+            filters (List, optional): List[Tuple] or List[List[Tuple]] or None (default)
+                Rows which do not match the filter predicate will be removed from scanned data.
+                Partition keys embedded in a nested directory structure will be exploited to avoid
+                loading files at all if they contain no matching rows. If use_legacy_dataset is True,
+                filters can only reference partition keys and only a hive-style directory structure
+                is supported. When setting use_legacy_dataset to False, also within-file level filtering
+                and different partitioning schemes are supported.
+                More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
+            force_download (bool, optional): If True then will always download a file even
+                if it is already on disk. Defaults to False.
+            download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+                Defaults to download_folder as set in __init__
+            dataframe_type (str, optional): Type
+        Returns:
+            class:`pandas.DataFrame`: a dataframe containing the requested data.
+                If multiple dataset instances are retrieved then these are concatenated first.
+        """
+        catalog = self._use_catalog(catalog)
+
+        # sample data is limited to csv
+        if dt_str == "sample":
+            dataset_format = "csv"
+
+        if not download_folder:
+            download_folder = self.download_folder
+        download_res = self.download(
+            dataset,
+            dt_str,
+            dataset_format,
+            catalog,
+            n_par,
+            show_progress,
+            force_download,
+            download_folder,
+            return_paths=True,
+        )
+
+        if not download_res:
+            raise ValueError("Must specify 'return_paths=True' in download call to use this function")
+
+        if not all(res[0] for res in download_res):
+            failed_res = [res for res in download_res if not res[0]]
+            raise Exception(
+                f"Not all downloads were successfully completed. "
+                f"Re-run to collect missing files. The following failed:\n{failed_res}"
+            )
+
+        files = [res[1] for res in download_res]
+
+        pd_read_fn_map = {
+            "csv": read_csv,
+            "parquet": read_parquet,
+            "parq": read_parquet,
+            "json": read_json,
+            "raw": read_csv,
+        }
+
+        pd_read_default_kwargs: dict[str, dict[str, object]] = {
+            "csv": {
+                "columns": columns,
+                "filters": filters,
+                "fs": self.fs,
+                "dataframe_type": dataframe_type,
+            },
+            "parquet": {
+                "columns": columns,
+                "filters": filters,
+                "fs": self.fs,
+                "dataframe_type": dataframe_type,
+            },
+            "json": {
+                "columns": columns,
+                "filters": filters,
+                "fs": self.fs,
+                "dataframe_type": dataframe_type,
+            },
+            "raw": {
+                "columns": columns,
+                "filters": filters,
+                "fs": self.fs,
+                "dataframe_type": dataframe_type,
+            },
+        }
+
+        pd_read_default_kwargs["parq"] = pd_read_default_kwargs["parquet"]
+
+        pd_reader = pd_read_fn_map.get(dataset_format)
+        pd_read_kwargs = pd_read_default_kwargs.get(dataset_format, {})
+        if not pd_reader:
+            raise Exception(f"No pandas function to read file in format {dataset_format}")
+
+        pd_read_kwargs.update(kwargs)
+
+        if len(files) == 0:
+            raise APIResponseError(
+                f"No series members for dataset: {dataset} "
+                f"in date or date range: {dt_str} and format: {dataset_format}"
+            )
+        if dataset_format in ["parquet", "parq"]:
+            data_df = pd_reader(files, **pd_read_kwargs)  # type: ignore
+        elif dataset_format == "raw":
+            dataframes = (
+                pd.concat(
+                    [pd_reader(ZipFile(f).open(p), **pd_read_kwargs) for p in ZipFile(f).namelist()],  # type: ignore
+                    ignore_index=True,
+                )
+                for f in files
+            )
+            data_df = pd.concat(dataframes, ignore_index=True)
+        else:
+            dataframes = (pd_reader(f, **pd_read_kwargs) for f in files)  # type: ignore
+            if dataframe_type == "pandas":
+                data_df = pd.concat(dataframes, ignore_index=True)
+            if dataframe_type == "polars":
+                import polars as pl
+
+                data_df = pl.concat(dataframes, how="diagonal")  # type: ignore
+
+        return data_df
+
+    def to_bytes(
+        self,
+        dataset: str,
+        series_member: str,
+        dataset_format: str = "parquet",
+        catalog: str | None = None,
+    ) -> BytesIO:
+        """Returns an instance of dataset (the distribution) as a bytes object.
+
+        Args:
+            dataset (str): A dataset identifier
+            series_member (str,): A dataset series member identifier
+            dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        """
+
+        catalog = self._use_catalog(catalog)
+
+        url = distribution_to_url(
+            self.root_url,
+            dataset,
+            series_member,
+            dataset_format,
+            catalog,
+        )
+
+        return Fusion._call_for_bytes_object(url, self.session)
+
+    def to_table(  # noqa: PLR0913
+        self,
+        dataset: str,
+        dt_str: str = "latest",
+        dataset_format: str = "parquet",
+        catalog: str | None = None,
+        n_par: int | None = None,
+        show_progress: bool = True,
+        columns: list[str] | None = None,
+        filters: PyArrowFilterT | None = None,
+        force_download: bool = False,
+        download_folder: str | None = None,
+        **kwargs: Any,
+    ) -> pa.Table:
+        """Gets distributions for a specified date or date range and returns the data as an arrow table.
+
+        Args:
+            dataset (str): A dataset identifier
+            dt_str (str, optional): Either a single date or a range identified by a start or end date,
+                or both separated with a ":". Defaults to 'latest' which will return the most recent
+                instance of the dataset.
+            dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            n_par (int, optional): Specify how many distributions to download in parallel.
+                Defaults to all cpus available.
+            show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+            columns (List, optional): A list of columns to return from a parquet file. Defaults to None
+            filters (List, optional): List[Tuple] or List[List[Tuple]] or None (default)
+                Rows which do not match the filter predicate will be removed from scanned data.
+                Partition keys embedded in a nested directory structure will be exploited to avoid
+                loading files at all if they contain no matching rows. If use_legacy_dataset is True,
+                filters can only reference partition keys and only a hive-style directory structure
+                is supported. When setting use_legacy_dataset to False, also within-file level filtering
+                and different partitioning schemes are supported.
+                More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
+            force_download (bool, optional): If True then will always download a file even
+                if it is already on disk. Defaults to False.
+            download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+                Defaults to download_folder as set in __init__
+        Returns:
+            class:`pyarrow.Table`: a dataframe containing the requested data.
+                If multiple dataset instances are retrieved then these are concatenated first.
+        """
+        catalog = self._use_catalog(catalog)
+        n_par = cpu_count(n_par)
+        if not download_folder:
+            download_folder = self.download_folder
+        download_res = self.download(
+            dataset,
+            dt_str,
+            dataset_format,
+            catalog,
+            n_par,
+            show_progress,
+            force_download,
+            download_folder,
+            return_paths=True,
+        )
+
+        if not download_res:
+            raise ValueError("Must specify 'return_paths=True' in download call to use this function")
+
+        if not all(res[0] for res in download_res):
+            failed_res = [res for res in download_res if not res[0]]
+            raise RuntimeError(
+                f"Not all downloads were successfully completed. "
+                f"Re-run to collect missing files. The following failed:\n{failed_res}"
+            )
+
+        files = [res[1] for res in download_res]
+
+        read_fn_map = {
+            "csv": csv_to_table,
+            "parquet": parquet_to_table,
+            "parq": parquet_to_table,
+            "json": json_to_table,
+            "raw": csv_to_table,
+        }
+
+        read_default_kwargs: dict[str, dict[str, object]] = {
+            "csv": {"columns": columns, "filters": filters, "fs": self.fs},
+            "parquet": {"columns": columns, "filters": filters, "fs": self.fs},
+            "json": {"columns": columns, "filters": filters, "fs": self.fs},
+            "raw": {"columns": columns, "filters": filters, "fs": self.fs},
+        }
+
+        read_default_kwargs["parq"] = read_default_kwargs["parquet"]
+
+        reader = read_fn_map.get(dataset_format)
+        read_kwargs = read_default_kwargs.get(dataset_format, {})
+        if not reader:
+            raise AssertionError(f"No function to read file in format {dataset_format}")
+
+        read_kwargs.update(kwargs)
+
+        if len(files) == 0:
+            raise APIResponseError(
+                f"No series members for dataset: {dataset} "
+                f"in date or date range: {dt_str} and format: {dataset_format}"
+            )
+        if dataset_format in ["parquet", "parq"]:
+            tbl = reader(files, **read_kwargs)  # type: ignore
+        else:
+            tbl = (reader(f, **read_kwargs) for f in files)  # type: ignore
+            tbl = pa.concat_tables(tbl)
+
+        return tbl
+
+    def upload(  # noqa: PLR0913
+        self,
+        path: str,
+        dataset: str | None = None,
+        dt_str: str = "latest",
+        catalog: str | None = None,
+        n_par: int | None = None,
+        show_progress: bool = True,
+        return_paths: bool = False,
+        multipart: bool = True,
+        chunk_size: int = 5 * 2**20,
+        from_date: str | None = None,
+        to_date: str | None = None,
+        preserve_original_name: bool | None = False,
+        additional_headers: dict[str, str] | None = None,
+    ) -> list[tuple[bool, str, str | None]] | None:
+        """Uploads the requested files/files to Fusion.
+
+        Args:
+            path (str): path to a file or a folder with files
+            dataset (str, optional): Dataset identifier to which the file will be uploaded (for single file only).
+                                    If not provided the dataset will be implied from file's name.
+            dt_str (str, optional): A file name. Can be any string but is usually a date.
+                                    Defaults to 'latest' which will return the most recent.
+                                    Relevant for a single file upload only. If not provided the dataset will
+                                    be implied from file's name.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            n_par (int, optional): Specify how many distributions to download in parallel.
+                Defaults to all cpus available.
+            show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+            return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+            multipart (bool, optional): Is multipart upload.
+            chunk_size (int, optional): Maximum chunk size.
+            from_date (str, optional): start of the data date range contained in the distribution,
+                defaults to upoad date
+            to_date (str, optional): end of the data date range contained in the distribution,
+                defaults to upload date.
+            preserve_original_name (bool, optional): Preserve the original name of the file. Defaults to False.
+
+        Returns:
+
+
+        """
+        catalog = self._use_catalog(catalog)
+
+        if not self.fs.exists(path):
+            raise RuntimeError("The provided path does not exist")
+
+        fs_fusion = self.get_fusion_filesystem()
+        if self.fs.info(path)["type"] == "directory":
+            file_path_lst = self.fs.find(path)
+            local_file_validation = validate_file_names(file_path_lst, fs_fusion)
+            file_path_lst = [f for flag, f in zip(local_file_validation, file_path_lst) if flag]
+            file_name = [f.split("/")[-1] for f in file_path_lst]
+            is_raw_lst = is_dataset_raw(file_path_lst, fs_fusion)
+            local_url_eqiv = [path_to_url(i, r) for i, r in zip(file_path_lst, is_raw_lst)]
+        else:
+            file_path_lst = [path]
+            if not catalog or not dataset:
+                local_file_validation = validate_file_names(file_path_lst, fs_fusion)
+                file_path_lst = [f for flag, f in zip(local_file_validation, file_path_lst) if flag]
+                is_raw_lst = is_dataset_raw(file_path_lst, fs_fusion)
+                local_url_eqiv = [path_to_url(i, r) for i, r in zip(file_path_lst, is_raw_lst)]
+                if preserve_original_name:
+                    raise ValueError("preserve_original_name can only be used when catalog and dataset are provided.")
+            else:
+                date_identifier = re.compile(r"^(\d{4})(\d{2})(\d{2})$")
+                if date_identifier.match(dt_str):
+                    dt_str = dt_str if dt_str != "latest" else pd.Timestamp("today").date().strftime("%Y%m%d")
+                    dt_str = pd.Timestamp(dt_str).date().strftime("%Y%m%d")
+
+                if catalog not in fs_fusion.ls("") or dataset not in [
+                    i.split("/")[-1] for i in fs_fusion.ls(f"{catalog}/datasets")
+                ]:
+                    msg = (
+                        f"File file has not been uploaded, one of the catalog: {catalog} "
+                        f"or dataset: {dataset} does not exit."
+                    )
+                    warnings.warn(msg, stacklevel=2)
+                    return [(False, path, msg)]
+                file_format = path.split(".")[-1]
+                file_name = [path.split("/")[-1]]
+                file_format = "raw" if file_format not in RECOGNIZED_FORMATS else file_format
+
+                local_url_eqiv = [
+                    "/".join(distribution_to_url("", dataset, dt_str, file_format, catalog, False).split("/")[1:])
+                ]
+
+        if not preserve_original_name:
+            data_map_df = pd.DataFrame([file_path_lst, local_url_eqiv]).T
+            data_map_df.columns = pd.Index(["path", "url"])
+        else:
+            data_map_df = pd.DataFrame([file_path_lst, local_url_eqiv, file_name]).T
+            data_map_df.columns = pd.Index(["path", "url", "file_name"])
+
+        n_par = cpu_count(n_par)
+        parallel = len(data_map_df) > 1
+        res = upload_files(
+            fs_fusion,
+            self.fs,
+            data_map_df,
+            parallel=parallel,
+            n_par=n_par,
+            multipart=multipart,
+            chunk_size=chunk_size,
+            show_progress=show_progress,
+            from_date=from_date,
+            to_date=to_date,
+            additional_headers=additional_headers,
+        )
+
+        if not all(r[0] for r in res):
+            failed_res = [r for r in res if not r[0]]
+            msg = f"Not all uploads were successfully completed. The following failed:\n{failed_res}"
+            logger.warning(msg)
+            warnings.warn(msg, stacklevel=2)
+
+        return res if return_paths else None
+
+    def from_bytes(  # noqa: PLR0913
+        self,
+        data: BytesIO,
+        dataset: str,
+        series_member: str = "latest",
+        catalog: str | None = None,
+        distribution: str = "parquet",
+        show_progress: bool = True,
+        return_paths: bool = False,
+        chunk_size: int = 5 * 2**20,
+        from_date: str | None = None,
+        to_date: str | None = None,
+        file_name: str | None = None,
+        **kwargs: Any,  # noqa: ARG002
+    ) -> list[tuple[bool, str, str | None]] | None:
+        """Uploads data from an object in memory.
+
+        Args:
+            data (str): an object in memory to upload
+            dataset (str): Dataset name to which the bytes will be uploaded.
+            series_member (str, optional): A single date or label. Defaults to 'latest' which will return
+                the most recent.
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            distribution (str, optional): A distribution type, e.g. a file format or raw
+            show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+            return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+            chunk_size (int, optional): Maximum chunk size.
+            from_date (str, optional): start of the data date range contained in the distribution,
+                defaults to upload date
+            to_date (str, optional): end of the data date range contained in the distribution, defaults to upload date.
+            file_name (str, optional): file name to be used for the uploaded file. Defaults to Fusion standard naming.
+
+        Returns:
+            Optional[list[tuple[bool, str, Optional[str]]]: a list of tuples, one for each distribution
+
+        """
+        catalog = self._use_catalog(catalog)
+
+        fs_fusion = self.get_fusion_filesystem()
+        if distribution not in RECOGNIZED_FORMATS + ["raw"]:
+            raise ValueError(f"Dataset format {distribution} is not supported")
+
+        is_raw = js.loads(fs_fusion.cat(f"{catalog}/datasets/{dataset}"))["isRawData"]
+        local_url_eqiv = path_to_url(f"{dataset}__{catalog}__{series_member}.{distribution}", is_raw)
+
+        data_map_df = pd.DataFrame(["", local_url_eqiv, file_name]).T
+        data_map_df.columns = ["path", "url", "file_name"]  # type: ignore
+
+        res = upload_files(
+            fs_fusion,
+            data,
+            data_map_df,
+            parallel=False,
+            n_par=1,
+            multipart=False,
+            chunk_size=chunk_size,
+            show_progress=show_progress,
+            from_date=from_date,
+            to_date=to_date,
+        )
+
+        if not all(r[0] for r in res):
+            failed_res = [r for r in res if not r[0]]
+            msg = f"Not all uploads were successfully completed. The following failed:\n{failed_res}"
+            logger.warning(msg)
+            warnings.warn(msg, stacklevel=2)
+
+        return res if return_paths else None
+
+    def listen_to_events(
+        self,
+        last_event_id: str | None = None,
+        catalog: str | None = None,
+        url: str = "https://fusion.jpmorgan.com/api/v1/",
+    ) -> None | pd.DataFrame:
+        """Run server sent event listener in the background. Retrieve results by running get_events.
+
+        Args:
+            last_event_id (str): Last event ID (exclusive).
+            catalog (str): catalog.
+            url (str): subscription url.
+        Returns:
+            Union[None, class:`pandas.DataFrame`]: If in_background is True then the function returns no output.
+                If in_background is set to False then pandas DataFrame is output upon keyboard termination.
+        """
+
+        catalog = self._use_catalog(catalog)
+        import asyncio
+        import json
+        import threading
+
+        from aiohttp_sse_client import client as sse_client
+
+        from .utils import get_client
+
+        kwargs: dict[str, Any] = {}
+        if last_event_id:
+            kwargs = {"headers": {"Last-Event-ID": last_event_id}}
+
+        async def async_events() -> None:
+            """Events sync function.
+
+            Returns:
+                None
+            """
+            timeout = 1e100
+            session = await get_client(self.credentials, timeout=timeout)
+            async with sse_client.EventSource(
+                f"{url}catalogs/{catalog}/notifications/subscribe",
+                session=session,
+                **kwargs,
+            ) as messages:
+                lst = []
+                try:
+                    async for msg in messages:
+                        event = json.loads(msg.data)
+                        lst.append(event)
+                        if self.events is None:
+                            self.events = pd.DataFrame()
+                        else:
+                            self.events = pd.concat([self.events, pd.DataFrame(lst)], ignore_index=True)
+                except TimeoutError as ex:
+                    raise ex from None
+                except BaseException:
+                    raise
+
+        _ = self.list_catalogs()  # refresh token
+        if "headers" in kwargs:
+            kwargs["headers"].update({"authorization": f"bearer {self.credentials.bearer_token}"})
+        else:
+            kwargs["headers"] = {
+                "authorization": f"bearer {self.credentials.bearer_token}",
+            }
+        if "http" in self.credentials.proxies:
+            kwargs["proxy"] = self.credentials.proxies["http"]
+        elif "https" in self.credentials.proxies:
+            kwargs["proxy"] = self.credentials.proxies["https"]
+        th = threading.Thread(target=asyncio.run, args=(async_events(),), daemon=True)
+        th.start()
+        return None
+
+    def get_events(
+        self,
+        last_event_id: str | None = None,
+        catalog: str | None = None,
+        in_background: bool = True,
+        url: str = "https://fusion.jpmorgan.com/api/v1/",
+    ) -> None | pd.DataFrame:
+        """Run server sent event listener and print out the new events. Keyboard terminate to stop.
+
+        Args:
+            last_event_id (str): id of the last event.
+            catalog (str): catalog.
+            in_background (bool): execute event monitoring in the background (default = True).
+            url (str): subscription url.
+        Returns:
+            Union[None, class:`pandas.DataFrame`]: If in_background is True then the function returns no output.
+                If in_background is set to False then pandas DataFrame is output upon keyboard termination.
+        """
+
+        catalog = self._use_catalog(catalog)
+        if not in_background:
+            from sseclient import SSEClient
+
+            _ = self.list_catalogs()  # refresh token
+            interrupted = False
+            messages = SSEClient(
+                session=self.session,
+                url=f"{url}catalogs/{catalog}/notifications/subscribe",
+                last_id=last_event_id,
+                headers={
+                    "authorization": f"bearer {self.credentials.bearer_token}",
+                },
+            )
+            lst = []
+            try:
+                for msg in messages:
+                    event = js.loads(msg.data)
+                    if event["type"] != "HeartBeatNotification":
+                        lst.append(event)
+            except KeyboardInterrupt:
+                interrupted = True
+            except Exception as e:
+                raise e
+            finally:
+                result = pd.DataFrame(lst) if interrupted or lst else None
+            return result
+        else:
+            return self.events
+
+    def list_dataset_lineage(
+        self,
+        dataset_id: str,
+        catalog: str | None = None,
+        output: bool = False,
+        max_results: int = -1,
+    ) -> pd.DataFrame:
+        """List the upstream and downstream lineage of the dataset.
+
+        Args:
+            dataset (str): A dataset identifier
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            max_results (int, optional): Limit the number of rows returned in the dataframe.
+                Defaults to -1 which returns all results.
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each resource
+
+        Raises:
+            HTTPError: If the dataset is not found in the catalog.
+
+        """
+        catalog = self._use_catalog(catalog)
+
+        url_dataset = f"{self.root_url}catalogs/{catalog}/datasets/{dataset_id}"
+        resp_dataset = self.session.get(url_dataset)
+        resp_dataset.raise_for_status()
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset_id}/lineage"
+        resp = self.session.get(url)
+        data = resp.json()
+        relations_data = data["relations"]
+
+        restricted_datasets = [
+            dataset_metadata["identifier"]
+            for dataset_metadata in data["datasets"]
+            if dataset_metadata.get("status", None) == "Restricted"
+        ]
+
+        data_dict = {}
+
+        for entry in relations_data:
+            source_dataset_id = entry["source"]["dataset"]
+            source_catalog = entry["source"]["catalog"]
+            destination_dataset_id = entry["destination"]["dataset"]
+            destination_catalog = entry["destination"]["catalog"]
+
+            if destination_dataset_id == dataset_id:
+                for dataset in data["datasets"]:
+                    if dataset["identifier"] == source_dataset_id and dataset.get("status", None) != "Restricted":
+                        source_dataset_title = dataset["title"]
+                    elif dataset["identifier"] == source_dataset_id and dataset.get("status", None) == "Restricted":
+                        source_dataset_title = "Access Restricted"
+                data_dict[source_dataset_id] = (
+                    "source",
+                    source_catalog,
+                    source_dataset_title,
+                )
+
+            if source_dataset_id == dataset_id:
+                for dataset in data["datasets"]:
+                    if dataset["identifier"] == destination_dataset_id and dataset.get("status", None) != "Restricted":
+                        destination_dataset_title = dataset["title"]
+                    elif (
+                        dataset["identifier"] == destination_dataset_id and dataset.get("status", None) == "Restricted"
+                    ):
+                        destination_dataset_title = "Access Restricted"
+                data_dict[destination_dataset_id] = (
+                    "produced",
+                    destination_catalog,
+                    destination_dataset_title,
+                )
+
+        output_data = {
+            "type": [v[0] for v in data_dict.values()],
+            "dataset_identifier": list(data_dict.keys()),
+            "title": [v[2] for v in data_dict.values()],
+            "catalog": [v[1] for v in data_dict.values()],
+        }
+
+        lineage_df = pd.DataFrame(output_data)
+        lineage_df.loc[
+            lineage_df["dataset_identifier"].isin(restricted_datasets),
+            ["dataset_identifier", "catalog", "title"],
+        ] = "Access Restricted"
+
+        if max_results > -1:
+            lineage_df = lineage_df[0:max_results]
+
+        if output:
+            pass
+
+        return lineage_df
+
+    def create_dataset_lineage(
+        self,
+        base_dataset: str,
+        source_dataset_catalog_mapping: pd.DataFrame | list[dict[str, str]],
+        catalog: str | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Upload lineage to a dataset.
+
+        Args:
+            base_dataset (str): A dataset identifier to which you want to add lineage.
+            source_dataset_catalog_mapping (Union[pd.DataFrame, list[dict[str]]]): Mapping for the dataset
+                identifier(s) and catalog(s) from which to add lineage.
+            catalog (Optional[str], optional): Catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Raises:
+            ValueError: If source_dataset_catalog_mapping is not a pandas DataFrame or a list of dictionaries
+            HTTPError: If the request is unsuccessful.
+
+        Examples:
+            Creating lineage from a pandas DataFrame.
+            >>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+            >>> df = pd.DataFrame(data)
+            >>> fusion = Fusion()
+            >>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=df, catalog="c")
+
+            Creating lineage from a list of dictionaries.
+            >>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+            >>> fusion = Fusion()
+            >>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=data, catalog="c")
+
+        """
+        catalog = self._use_catalog(catalog)
+
+        if isinstance(source_dataset_catalog_mapping, pd.DataFrame):
+            dataset_mapping_list = [
+                {"dataset": row["dataset"], "catalog": row["catalog"]}
+                for _, row in source_dataset_catalog_mapping.iterrows()
+            ]
+        elif isinstance(source_dataset_catalog_mapping, list):
+            dataset_mapping_list = source_dataset_catalog_mapping
+        else:
+            raise ValueError("source_dataset_catalog_mapping must be a pandas DataFrame or a list of dictionaries.")
+        data = {"source": dataset_mapping_list}
+
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{base_dataset}/lineage"
+
+        resp = self.session.post(url, json=data)
+
+        resp.raise_for_status()
+
+        return resp if return_resp_obj else None
+
+    def list_product_dataset_mapping(
+        self,
+        dataset: str | list[str] | None = None,
+        product: str | list[str] | None = None,
+        catalog: str | None = None,
+    ) -> pd.DataFrame:
+        """get the product to dataset linking contained in  a catalog. A product is a grouping of datasets.
+
+        Args:
+            dataset (str | list[str] | None, optional): A string or list of strings that are dataset
+            identifiers to filter the output. If a list is provided then it will return
+            datasets whose identifier matches any of the strings. Defaults to None.
+            product (str | list[str] | None, optional): A string or list of strings that are product
+            identifiers to filter the output. If a list is provided then it will return
+            products whose identifier matches any of the strings. Defaults to None.
+            catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+
+        Returns:
+            pd.DataFrame: a dataframe with a row  for each dataset to product mapping.
+        """
+        catalog = self._use_catalog(catalog)
+        url = f"{self.root_url}catalogs/{catalog}/productDatasets"
+        mapping_df = pd.DataFrame(self._call_for_dataframe(url, self.session))
+
+        if dataset:
+            if isinstance(dataset, list):
+                contains = "|".join(f"{s}" for s in dataset)
+                mapping_df = mapping_df[mapping_df["dataset"].str.contains(contains, case=False)]
+            if isinstance(dataset, str):
+                mapping_df = mapping_df[mapping_df["dataset"].str.contains(dataset, case=False)]
+        if product:
+            if isinstance(product, list):
+                contains = "|".join(f"{s}" for s in product)
+                mapping_df = mapping_df[mapping_df["product"].str.contains(contains, case=False)]
+            if isinstance(product, str):
+                mapping_df = mapping_df[mapping_df["product"].str.contains(product, case=False)]
+        return mapping_df
+
+    def product(  # noqa: PLR0913
+        self,
+        identifier: str,
+        title: str = "",
+        category: str | list[str] | None = None,
+        short_abstract: str = "",
+        description: str = "",
+        is_active: bool = True,
+        is_restricted: bool | None = None,
+        maintainer: str | list[str] | None = None,
+        region: str | list[str] = "Global",
+        publisher: str = "J.P. Morgan",
+        sub_category: str | list[str] | None = None,
+        tag: str | list[str] | None = None,
+        delivery_channel: str | list[str] = "API",
+        theme: str | None = None,
+        release_date: str | None = None,
+        language: str = "English",
+        status: str = "Available",
+        image: str = "",
+        logo: str = "",
+        dataset: str | list[str] | None = None,
+        **kwargs: Any,
+    ) -> Product:
+        """Instantiate a Product object with this client for metadata creation.
+
+        Args:
+            identifier (str): Product identifier.
+            title (str, optional): Product title. If not provided, defaults to identifier.
+            category (str | list[str] | None, optional): Category. Defaults to None.
+            short_abstract (str, optional): Short description. Defaults to "".
+            description (str, optional): Description. If not provided, defaults to identifier.
+            is_active (bool, optional): Boolean for Active status. Defaults to True.
+            is_restricted (bool | None, optional): Flag for restricted products. Defaults to None.
+            maintainer (str | list[str] | None, optional): Product maintainer. Defaults to None.
+            region (str | list[str] | None, optional): Product region. Defaults to None.
+            publisher (str | None, optional): Name of vendor that publishes the data. Defaults to None.
+            sub_category (str | list[str] | None, optional): Product sub-category. Defaults to None.
+            tag (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+            delivery_channel (str | list[str], optional): Product delivery channel. Defaults to "API".
+            theme (str | None, optional): Product theme. Defaults to None.
+            release_date (str | None, optional): Product release date. Defaults to None.
+            language (str, optional): Product language. Defaults to "English".
+            status (str, optional): Product status. Defaults to "Available".
+            image (str, optional): Product image. Defaults to "".
+            logo (str, optional): Product logo. Defaults to "".
+            dataset (str | list[str] | None, optional): Product datasets. Defaults to None.
+
+        Returns:
+            Product: Fusion Product class instance.
+
+        Examples:
+            >>> fusion = Fusion()
+            >>> fusion.product(identifier="PRODUCT_1", title="Product")
+
+        Note:
+            See the product module for more information on functionalities of product objects.
+
+        """
+        product_obj = Product(
+            identifier=identifier,
+            title=title,
+            category=category,
+            short_abstract=short_abstract,
+            description=description,
+            is_active=is_active,
+            is_restricted=is_restricted,
+            maintainer=maintainer,
+            region=region,
+            publisher=publisher,
+            sub_category=sub_category,
+            tag=tag,
+            delivery_channel=delivery_channel,
+            theme=theme,
+            release_date=release_date,
+            language=language,
+            status=status,
+            image=image,
+            logo=logo,
+            dataset=dataset,
+            **kwargs,
+        )
+        product_obj.client = self
+        return product_obj
+
+    def dataset(  # noqa: PLR0913
+        self,
+        identifier: str,
+        title: str = "",
+        category: str | list[str] | None = None,
+        description: str = "",
+        frequency: str = "Once",
+        is_internal_only_dataset: bool = False,
+        is_third_party_data: bool = True,
+        is_restricted: bool | None = None,
+        is_raw_data: bool = True,
+        maintainer: str | None = "J.P. Morgan Fusion",
+        source: str | list[str] | None = None,
+        region: str | list[str] | None = None,
+        publisher: str = "J.P. Morgan",
+        product: str | list[str] | None = None,
+        sub_category: str | list[str] | None = None,
+        tags: str | list[str] | None = None,
+        created_date: str | None = None,
+        modified_date: str | None = None,
+        delivery_channel: str | list[str] = "API",
+        language: str = "English",
+        status: str = "Available",
+        type_: str | None = "Source",
+        container_type: str | None = "Snapshot-Full",
+        snowflake: str | None = None,
+        complexity: str | None = None,
+        is_immutable: bool | None = None,
+        is_mnpi: bool | None = None,
+        is_pci: bool | None = None,
+        is_pii: bool | None = None,
+        is_client: bool | None = None,
+        is_public: bool | None = None,
+        is_internal: bool | None = None,
+        is_confidential: bool | None = None,
+        is_highly_confidential: bool | None = None,
+        is_active: bool | None = None,
+        owners: list[str] | None = None,
+        application_id: str | dict[str, str] | None = None,
+        **kwargs: Any,
+    ) -> Dataset:
+        """Instantiate a Dataset object with this client for metadata creation.
+
+        Args:
+            identifier (str): Dataset identifier.
+            title (str, optional): Dataset title. If not provided, defaults to identifier.
+            category (str | list[str] | None, optional): A category or list of categories for the dataset.
+            Defaults to None.
+            description (str, optional): Dataset description. If not provided, defaults to identifier.
+            frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+            is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+            is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+            is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+            is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+            maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+            source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+            region (str | list[str] | None, optional): Region. Defaults to None.
+            publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+            product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+            sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+            tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+            created_date (str | None, optional): Created date. Defaults to None.
+            modified_date (str | None, optional): Modified date. Defaults to None.
+            delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+            language (str, optional): Language. Defaults to "English".
+            status (str, optional): Status. Defaults to "Available".
+            type_ (str | None, optional): Dataset type. Defaults to "Source".
+            container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+            snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+            complexity (str | None, optional): Complexity. Defaults to None.
+            is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+            is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+            is_pci (bool | None, optional): is_pci. Defaults to None.
+            is_pii (bool | None, optional): is_pii. Defaults to None.
+            is_client (bool | None, optional): is_client. Defaults to None.
+            is_public (bool | None, optional): is_public. Defaults to None.
+            is_internal (bool | None, optional): is_internal. Defaults to None.
+            is_confidential (bool | None, optional): is_confidential. Defaults to None.
+            is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+            is_active (bool | None, optional): is_active. Defaults to None.
+            owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+            application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+
+        Returns:
+            Dataset: Fusion Dataset class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset(identifier="DATASET_1")
+
+        Note:
+            See the dataset module for more information on functionalities of dataset objects.
+
+        """
+        dataset_obj = Dataset(
+            identifier=identifier,
+            title=title,
+            category=category,
+            description=description,
+            frequency=frequency,
+            is_internal_only_dataset=is_internal_only_dataset,
+            is_third_party_data=is_third_party_data,
+            is_restricted=is_restricted,
+            is_raw_data=is_raw_data,
+            maintainer=maintainer,
+            source=source,
+            region=region,
+            publisher=publisher,
+            product=product,
+            sub_category=sub_category,
+            tags=tags,
+            created_date=created_date,
+            modified_date=modified_date,
+            delivery_channel=delivery_channel,
+            language=language,
+            status=status,
+            type_=type_,
+            container_type=container_type,
+            snowflake=snowflake,
+            complexity=complexity,
+            is_immutable=is_immutable,
+            is_mnpi=is_mnpi,
+            is_pci=is_pci,
+            is_pii=is_pii,
+            is_client=is_client,
+            is_public=is_public,
+            is_internal=is_internal,
+            is_confidential=is_confidential,
+            is_highly_confidential=is_highly_confidential,
+            is_active=is_active,
+            owners=owners,
+            application_id=application_id,
+            **kwargs,
+        )
+        dataset_obj.client = self
+        return dataset_obj
+
+    def attribute(  # noqa: PLR0913
+        self,
+        identifier: str,
+        index: int,
+        data_type: str | Types = "String",
+        title: str = "",
+        description: str = "",
+        is_dataset_key: bool = False,
+        source: str | None = None,
+        source_field_id: str | None = None,
+        is_internal_dataset_key: bool | None = None,
+        is_externally_visible: bool | None = True,
+        unit: Any | None = None,
+        multiplier: float = 1.0,
+        is_propagation_eligible: bool | None = None,
+        is_metric: bool | None = None,
+        available_from: str | None = None,
+        deprecated_from: str | None = None,
+        term: str = "bizterm1",
+        dataset: int | None = None,
+        attribute_type: str | None = None,
+        application_id: str | dict[str, str] | None = None,
+        **kwargs: Any,
+    ) -> Attribute:
+        """Instantiate an Attribute object with this client for metadata creation.
+
+        Args:
+            identifier (str): The unique identifier for the attribute.
+            index (int): Attribute index.
+            data_type (str | Types, optional): Datatype of attribute. Defaults to "String".
+            title (str, optional): Attribute title. If not provided, defaults to identifier.
+            description (str, optional): Attribute description. If not provided, defaults to identifier.
+            is_dataset_key (bool, optional): Flag for primary keys. Defaults to False.
+            source (str | None, optional): Name of data vendor which provided the data. Defaults to None.
+            source_field_id (str | None, optional): Original identifier of attribute, if attribute has been renamed.
+                If not provided, defaults to identifier.
+            is_internal_dataset_key (bool | None, optional): Flag for internal primary keys. Defaults to None.
+            is_externally_visible (bool | None, optional): Flag for externally visible attributes. Defaults to True.
+            unit (Any | None, optional): Unit of attribute. Defaults to None.
+            multiplier (float, optional): Multiplier for unit. Defaults to 1.0.
+            is_propagation_eligible (bool | None, optional): Flag for propagation eligibility. Defaults to None.
+            is_metric (bool | None, optional): Flag for attributes that are metrics. Defaults to None.
+            available_from (str | None, optional): Date from which the attribute is available. Defaults to None.
+            deprecated_from (str | None, optional): Date from which the attribute is deprecated. Defaults to None.
+            term (str, optional): Term. Defaults to "bizterm1".
+            dataset (int | None, optional): Dataset. Defaults to None.
+            attribute_type (str | None, optional): Attribute type. Defaults to None.
+
+        Returns:
+            Attribute: Fusion Attribute class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attr = fusion.attribute(identifier="attr1", index=0)
+
+        Note:
+            See the attributes module for more information on functionalities of attribute objects.
+
+        """
+        data_type = Types[str(data_type).strip().rsplit(".", maxsplit=1)[-1].title()]
+        attribute_obj = Attribute(
+            identifier=identifier,
+            index=index,
+            data_type=data_type,
+            title=title,
+            description=description,
+            is_dataset_key=is_dataset_key,
+            source=source,
+            source_field_id=source_field_id,
+            is_internal_dataset_key=is_internal_dataset_key,
+            is_externally_visible=is_externally_visible,
+            unit=unit,
+            multiplier=multiplier,
+            is_propagation_eligible=is_propagation_eligible,
+            is_metric=is_metric,
+            available_from=available_from,
+            deprecated_from=deprecated_from,
+            term=term,
+            dataset=dataset,
+            attribute_type=attribute_type,
+            application_id=application_id,
+            **kwargs,
+        )
+        attribute_obj.client = self
+        return attribute_obj
+
+    def attributes(
+        self,
+        attributes: list[Attribute] | None = None,
+    ) -> Attributes:
+        """Instantiate an Attributes object with this client for metadata creation.
+
+        Args:
+            attributes (list[Attribute] | None, optional): List of Attribute objects. Defaults to None.
+
+        Returns:
+            Attributes: Fusion Attributes class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attr1 = fusion.attribute("attr1", 0)
+            >>> attr2 = fusion.attribute("attr2", 1)
+            >>> attrs = fusion.attributes([attr1, attr2])
+
+        Note:
+            See the attributes module for more information on functionalities of attributes object.
+
+        """
+        attributes_obj = Attributes(attributes=attributes or [])
+        attributes_obj.client = self
+        return attributes_obj
+
+    def delete_datasetmembers(
+        self,
+        dataset: str,
+        series_members: str | list[str],
+        catalog: str | None = None,
+        return_resp_obj: bool = False,
+    ) -> list[requests.Response] | None:
+        """Delete dataset members.
+
+        Args:
+            dataset (str): A dataset identifier
+            series_members (str | list[str]): A string or list of strings that are dataset series member
+            identifiers to delete.
+            catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            list[requests.Response]: a list of response objects.
+
+        Examples:
+            Delete one dataset member.
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.delete_datasetmembers(dataset="dataset1", series_members="series1")
+
+            Delete multiple dataset members.
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.delete_datasetmembers(dataset="dataset1", series_members=["series1", "series2"])
+
+        """
+        catalog = self._use_catalog(catalog)
+        if isinstance(series_members, str):
+            series_members = [series_members]
+        responses = []
+        for series_member in series_members:
+            url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series_member}"
+            resp = self.session.delete(url)
+            requests_raise_for_status(resp)
+            responses.append(resp)
+        return responses if return_resp_obj else None
+
+    def delete_all_datasetmembers(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Delete all dataset members within a dataset.
+
+        Args:
+            dataset (str): A dataset identifier
+            catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            list[requests.Response]: a list of response objects.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.delete_all_datasetmembers(dataset="dataset1")
+
+        """
+        catalog = self._use_catalog(catalog)
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries"
+        resp = self.session.delete(url)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def list_registered_attributes(
+        self,
+        catalog: str | None = None,
+        output: bool = False,
+        display_all_columns: bool = False,
+    ) -> pd.DataFrame:
+        """Returns the list of attributes in a catalog.
+
+        Args:
+            catalog (str, optional): A catalog identifier. Defaults to 'common'.
+            output (bool, optional): If True then print the dataframe. Defaults to False.
+            display_all_columns (bool, optional): If True displays all columns returned by the API,
+                otherwise only the key columns are displayed
+
+        Returns:
+            class:`pandas.DataFrame`: A dataframe with a row for each attribute
+        """
+        catalog = self._use_catalog(catalog)
+
+        url = f"{self.root_url}catalogs/{catalog}/attributes"
+        ds_attr_df = Fusion._call_for_dataframe(url, self.session).reset_index(drop=True)
+
+        if not display_all_columns:
+            ds_attr_df = ds_attr_df[
+                ds_attr_df.columns.intersection(
+                    [
+                        "identifier",
+                        "title",
+                        "dataType",
+                        "description",
+                        "publisher",
+                        "applicationId",
+                    ]
+                )
+            ]
+
+        if output:
+            pass
+
+        return ds_attr_df
+
+    def report(  # noqa: PLR0913
+        self,
+        identifier: str,
+        title: str = "",
+        category: str | list[str] | None = None,
+        description: str = "",
+        frequency: str = "Once",
+        is_internal_only_dataset: bool = False,
+        is_third_party_data: bool = True,
+        is_restricted: bool | None = None,
+        is_raw_data: bool = True,
+        maintainer: str | None = "J.P. Morgan Fusion",
+        source: str | list[str] | None = None,
+        region: str | list[str] | None = None,
+        publisher: str = "J.P. Morgan",
+        product: str | list[str] | None = None,
+        sub_category: str | list[str] | None = None,
+        tags: str | list[str] | None = None,
+        created_date: str | None = None,
+        modified_date: str | None = None,
+        delivery_channel: str | list[str] = "API",
+        language: str = "English",
+        status: str = "Available",
+        type_: str | None = "Report",
+        container_type: str | None = "Snapshot-Full",
+        snowflake: str | None = None,
+        complexity: str | None = None,
+        is_immutable: bool | None = None,
+        is_mnpi: bool | None = None,
+        is_pci: bool | None = None,
+        is_pii: bool | None = None,
+        is_client: bool | None = None,
+        is_public: bool | None = None,
+        is_internal: bool | None = None,
+        is_confidential: bool | None = None,
+        is_highly_confidential: bool | None = None,
+        is_active: bool | None = None,
+        owners: list[str] | None = None,
+        application_id: str | dict[str, str] | None = None,
+        report: dict[str, str] | None = None,
+        **kwargs: Any,
+    ) -> Report:
+        """Instantiate Report object with this client for metadata creation for managing regulatory reporting metadata.
+
+        Args:
+            identifier (str): Dataset identifier.
+            title (str, optional): Dataset title. If not provided, defaults to identifier.
+            category (str | list[str] | None, optional): A category or list of categories for the dataset.
+            Defaults to None.
+            description (str, optional): Dataset description. If not provided, defaults to identifier.
+            frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+            is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+            is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+            is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+            is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+            maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+            source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+            region (str | list[str] | None, optional): Region. Defaults to None.
+            publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+            product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+            sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+            tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+            created_date (str | None, optional): Created date. Defaults to None.
+            modified_date (str | None, optional): Modified date. Defaults to None.
+            delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+            language (str, optional): Language. Defaults to "English".
+            status (str, optional): Status. Defaults to "Available".
+            type_ (str | None, optional): Dataset type. Defaults to "Source".
+            container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+            snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+            complexity (str | None, optional): Complexity. Defaults to None.
+            is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+            is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+            is_pci (bool | None, optional): is_pci. Defaults to None.
+            is_pii (bool | None, optional): is_pii. Defaults to None.
+            is_client (bool | None, optional): is_client. Defaults to None.
+            is_public (bool | None, optional): is_public. Defaults to None.
+            is_internal (bool | None, optional): is_internal. Defaults to None.
+            is_confidential (bool | None, optional): is_confidential. Defaults to None.
+            is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+            is_active (bool | None, optional): is_active. Defaults to None.
+            owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+            application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+            report (dict[str, str] | None, optional): The report metadata. Specifies the tier of the report.
+                Required for registered reports to the catalog.
+
+        Returns:
+            Dataset: Fusion Dataset class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.report(identifier="DATASET_1")
+
+        Note:
+            See the dataset module for more information on functionalities of report objects.
+
+        """
+        report_obj = Report(
+            identifier=identifier,
+            title=title,
+            category=category,
+            description=description,
+            frequency=frequency,
+            is_internal_only_dataset=is_internal_only_dataset,
+            is_third_party_data=is_third_party_data,
+            is_restricted=is_restricted,
+            is_raw_data=is_raw_data,
+            maintainer=maintainer,
+            source=source,
+            region=region,
+            publisher=publisher,
+            product=product,
+            sub_category=sub_category,
+            tags=tags,
+            created_date=created_date,
+            modified_date=modified_date,
+            delivery_channel=delivery_channel,
+            language=language,
+            status=status,
+            type_=type_,
+            container_type=container_type,
+            snowflake=snowflake,
+            complexity=complexity,
+            is_immutable=is_immutable,
+            is_mnpi=is_mnpi,
+            is_pci=is_pci,
+            is_pii=is_pii,
+            is_client=is_client,
+            is_public=is_public,
+            is_internal=is_internal,
+            is_confidential=is_confidential,
+            is_highly_confidential=is_highly_confidential,
+            is_active=is_active,
+            owners=owners,
+            application_id=application_id,
+            report=report,
+            **kwargs,
+        )
+        report_obj.client = self
+        return report_obj
+
+    def input_dataflow(  # noqa: PLR0913
+        self,
+        identifier: str,
+        title: str = "",
+        category: str | list[str] | None = None,
+        description: str = "",
+        frequency: str = "Once",
+        is_internal_only_dataset: bool = False,
+        is_third_party_data: bool = True,
+        is_restricted: bool | None = None,
+        is_raw_data: bool = True,
+        maintainer: str | None = "J.P. Morgan Fusion",
+        source: str | list[str] | None = None,
+        region: str | list[str] | None = None,
+        publisher: str = "J.P. Morgan",
+        product: str | list[str] | None = None,
+        sub_category: str | list[str] | None = None,
+        tags: str | list[str] | None = None,
+        created_date: str | None = None,
+        modified_date: str | None = None,
+        delivery_channel: str | list[str] = "API",
+        language: str = "English",
+        status: str = "Available",
+        type_: str | None = "Flow",
+        container_type: str | None = "Snapshot-Full",
+        snowflake: str | None = None,
+        complexity: str | None = None,
+        is_immutable: bool | None = None,
+        is_mnpi: bool | None = None,
+        is_pci: bool | None = None,
+        is_pii: bool | None = None,
+        is_client: bool | None = None,
+        is_public: bool | None = None,
+        is_internal: bool | None = None,
+        is_confidential: bool | None = None,
+        is_highly_confidential: bool | None = None,
+        is_active: bool | None = None,
+        owners: list[str] | None = None,
+        application_id: str | dict[str, str] | None = None,
+        producer_application_id: dict[str, str] | None = None,
+        consumer_application_id: list[dict[str, str]] | dict[str, str] | None = None,
+        flow_details: dict[str, str] | None = None,
+        **kwargs: Any,
+    ) -> InputDataFlow:
+        """Instantiate an Input Dataflow object with this client for metadata creation.
+
+        Args:
+            identifier (str): Dataset identifier.
+            title (str, optional): Dataset title. If not provided, defaults to identifier.
+            category (str | list[str] | None, optional): A category or list of categories for the dataset.
+            Defaults to None.
+            description (str, optional): Dataset description. If not provided, defaults to identifier.
+            frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+            is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+            is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+            is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+            is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+            maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+            source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+            region (str | list[str] | None, optional): Region. Defaults to None.
+            publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+            product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+            sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+            tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+            created_date (str | None, optional): Created date. Defaults to None.
+            modified_date (str | None, optional): Modified date. Defaults to None.
+            delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+            language (str, optional): Language. Defaults to "English".
+            status (str, optional): Status. Defaults to "Available".
+            type_ (str | None, optional): Dataset type. Defaults to "Flow".
+            container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+            snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+            complexity (str | None, optional): Complexity. Defaults to None.
+            is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+            is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+            is_pci (bool | None, optional): is_pci. Defaults to None.
+            is_pii (bool | None, optional): is_pii. Defaults to None.
+            is_client (bool | None, optional): is_client. Defaults to None.
+            is_public (bool | None, optional): is_public. Defaults to None.
+            is_internal (bool | None, optional): is_internal. Defaults to None.
+            is_confidential (bool | None, optional): is_confidential. Defaults to None.
+            is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+            is_active (bool | None, optional): is_active. Defaults to None.
+            owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+            application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+            producer_application_id (dict[str, str] | None, optional): The producer application ID (upstream application
+                producing the flow).
+            consumer_application_id (list[dict[str, str]] | dict[str, str] | None, optional): The consumer application 
+                ID (downstream application, consuming the flow).
+            flow_details (dict[str, str] | None, optional): The flow details. Specifies input versus output flow.
+                Defaults to {"flowDirection": "Input"}.
+
+        Returns:
+            Dataset: Fusion InputDataFlow class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.input_dataflow(identifier="MY_DATAFLOW")
+
+        Note:
+            See the dataset module for more information on functionalities of input dataflow objects.
+
+        """
+        flow_details = {"flowDirection": "Input"} if flow_details is None else flow_details
+        dataflow_obj = InputDataFlow(
+            identifier=identifier,
+            title=title,
+            category=category,
+            description=description,
+            frequency=frequency,
+            is_internal_only_dataset=is_internal_only_dataset,
+            is_third_party_data=is_third_party_data,
+            is_restricted=is_restricted,
+            is_raw_data=is_raw_data,
+            maintainer=maintainer,
+            source=source,
+            region=region,
+            publisher=publisher,
+            product=product,
+            sub_category=sub_category,
+            tags=tags,
+            created_date=created_date,
+            modified_date=modified_date,
+            delivery_channel=delivery_channel,
+            language=language,
+            status=status,
+            type_=type_,
+            container_type=container_type,
+            snowflake=snowflake,
+            complexity=complexity,
+            is_immutable=is_immutable,
+            is_mnpi=is_mnpi,
+            is_pci=is_pci,
+            is_pii=is_pii,
+            is_client=is_client,
+            is_public=is_public,
+            is_internal=is_internal,
+            is_confidential=is_confidential,
+            is_highly_confidential=is_highly_confidential,
+            is_active=is_active,
+            owners=owners,
+            application_id=application_id,
+            producer_application_id=producer_application_id,
+            consumer_application_id=consumer_application_id,
+            flow_details=flow_details,
+            **kwargs,
+        )
+        dataflow_obj.client = self
+        return dataflow_obj
+
+    def output_dataflow(  # noqa: PLR0913
+        self,
+        identifier: str,
+        title: str = "",
+        category: str | list[str] | None = None,
+        description: str = "",
+        frequency: str = "Once",
+        is_internal_only_dataset: bool = False,
+        is_third_party_data: bool = True,
+        is_restricted: bool | None = None,
+        is_raw_data: bool = True,
+        maintainer: str | None = "J.P. Morgan Fusion",
+        source: str | list[str] | None = None,
+        region: str | list[str] | None = None,
+        publisher: str = "J.P. Morgan",
+        product: str | list[str] | None = None,
+        sub_category: str | list[str] | None = None,
+        tags: str | list[str] | None = None,
+        created_date: str | None = None,
+        modified_date: str | None = None,
+        delivery_channel: str | list[str] = "API",
+        language: str = "English",
+        status: str = "Available",
+        type_: str | None = "Flow",
+        container_type: str | None = "Snapshot-Full",
+        snowflake: str | None = None,
+        complexity: str | None = None,
+        is_immutable: bool | None = None,
+        is_mnpi: bool | None = None,
+        is_pci: bool | None = None,
+        is_pii: bool | None = None,
+        is_client: bool | None = None,
+        is_public: bool | None = None,
+        is_internal: bool | None = None,
+        is_confidential: bool | None = None,
+        is_highly_confidential: bool | None = None,
+        is_active: bool | None = None,
+        owners: list[str] | None = None,
+        application_id: str | dict[str, str] | None = None,
+        producer_application_id: dict[str, str] | None = None,
+        consumer_application_id: list[dict[str, str]] | dict[str, str] | None = None,
+        flow_details: dict[str, str] | None = None,
+        **kwargs: Any,
+    ) -> OutputDataFlow:
+        """Instantiate an Output Dataflow object with this client for metadata creation.
+
+        Args:
+            identifier (str): Dataset identifier.
+            title (str, optional): Dataset title. If not provided, defaults to identifier.
+            category (str | list[str] | None, optional): A category or list of categories for the dataset.
+            Defaults to None.
+            description (str, optional): Dataset description. If not provided, defaults to identifier.
+            frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+            is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+            is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+            is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+            is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+            maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+            source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+            region (str | list[str] | None, optional): Region. Defaults to None.
+            publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+            product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+            sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+            tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+            created_date (str | None, optional): Created date. Defaults to None.
+            modified_date (str | None, optional): Modified date. Defaults to None.
+            delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+            language (str, optional): Language. Defaults to "English".
+            status (str, optional): Status. Defaults to "Available".
+            type_ (str | None, optional): Dataset type. Defaults to "Flow".
+            container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+            snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+            complexity (str | None, optional): Complexity. Defaults to None.
+            is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+            is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+            is_pci (bool | None, optional): is_pci. Defaults to None.
+            is_pii (bool | None, optional): is_pii. Defaults to None.
+            is_client (bool | None, optional): is_client. Defaults to None.
+            is_public (bool | None, optional): is_public. Defaults to None.
+            is_internal (bool | None, optional): is_internal. Defaults to None.
+            is_confidential (bool | None, optional): is_confidential. Defaults to None.
+            is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+            is_active (bool | None, optional): is_active. Defaults to None.
+            owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+            application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+            producer_application_id (dict[str, str] | None, optional): The producer application ID (upstream application
+                producing the flow).
+            consumer_application_id (list[dict[str, str]] | dict[str, str] | None, optional): The consumer application 
+                ID (downstream application, consuming the flow).
+            flow_details (dict[str, str] | None, optional): The flow details. Specifies input versus output flow.
+                Defaults to {"flowDirection": "Output"}.
+
+        Returns:
+            Dataset: Fusion OutputDataFlow class.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.output_dataflow(identifier="MY_DATAFLOW")
+
+        Note:
+            See the dataset module for more information on functionalities of output dataflow objects.
+
+        """
+        flow_details = {"flowDirection": "Output"} if flow_details is None else flow_details
+        dataflow_obj = OutputDataFlow(
+            identifier=identifier,
+            title=title,
+            category=category,
+            description=description,
+            frequency=frequency,
+            is_internal_only_dataset=is_internal_only_dataset,
+            is_third_party_data=is_third_party_data,
+            is_restricted=is_restricted,
+            is_raw_data=is_raw_data,
+            maintainer=maintainer,
+            source=source,
+            region=region,
+            publisher=publisher,
+            product=product,
+            sub_category=sub_category,
+            tags=tags,
+            created_date=created_date,
+            modified_date=modified_date,
+            delivery_channel=delivery_channel,
+            language=language,
+            status=status,
+            type_=type_,
+            container_type=container_type,
+            snowflake=snowflake,
+            complexity=complexity,
+            is_immutable=is_immutable,
+            is_mnpi=is_mnpi,
+            is_pci=is_pci,
+            is_pii=is_pii,
+            is_client=is_client,
+            is_public=is_public,
+            is_internal=is_internal,
+            is_confidential=is_confidential,
+            is_highly_confidential=is_highly_confidential,
+            is_active=is_active,
+            owners=owners,
+            application_id=application_id,
+            producer_application_id=producer_application_id,
+            consumer_application_id=consumer_application_id,
+            flow_details=flow_details,
+            **kwargs,
+        )
+        dataflow_obj.client = self
+        return dataflow_obj
+
+
+ + + +
+ + + + + + + +
+ + + +

+ default_catalog: str + + + property + writable + + +

+ + +
+ +

Returns the default catalog.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

None

+
+
+
+ +
+ + + +
+ + +

+ __init__(credentials='config/client_credentials.json', root_url='https://fusion.jpmorgan.com/api/v1/', download_folder='downloads', log_level=logging.ERROR, fs=None, log_path='.') + +

+ + +
+ +

Constructor to instantiate a new Fusion object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
credentials + Union[str, FusionCredentials] + +
+

A path to a credentials file or a fully populated

+
+
+ 'config/client_credentials.json' +
root_url + _type_ + +
+

The API root URL. +Defaults to "https://fusion.jpmorgan.com/api/v1/".

+
+
+ 'https://fusion.jpmorgan.com/api/v1/' +
download_folder + str + +
+

The folder path where downloaded data files +are saved. Defaults to "downloads".

+
+
+ 'downloads' +
log_level + int + +
+

Set the logging level. Defaults to logging.ERROR.

+
+
+ ERROR +
fs + filesystem + +
+

filesystem.

+
+
+ None +
log_path + str + +
+

The folder path where the log is stored.

+
+
+ '.' +
+ +
+ Source code in py_src/fusion/fusion.py +
101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
def __init__(
+    self,
+    credentials: str | FusionCredentials = "config/client_credentials.json",
+    root_url: str = "https://fusion.jpmorgan.com/api/v1/",
+    download_folder: str = "downloads",
+    log_level: int = logging.ERROR,
+    fs: fsspec.filesystem = None,
+    log_path: str = ".",
+) -> None:
+    """Constructor to instantiate a new Fusion object.
+
+    Args:
+        credentials (Union[str, FusionCredentials]): A path to a credentials file or a fully populated
+        FusionCredentials object. Defaults to 'config/client_credentials.json'.
+        root_url (_type_, optional): The API root URL.
+            Defaults to "https://fusion.jpmorgan.com/api/v1/".
+        download_folder (str, optional): The folder path where downloaded data files
+            are saved. Defaults to "downloads".
+        log_level (int, optional): Set the logging level. Defaults to logging.ERROR.
+        fs (fsspec.filesystem): filesystem.
+        log_path (str, optional): The folder path where the log is stored.
+    """
+    self._default_catalog = "common"
+
+    self.root_url = root_url
+    self.download_folder = download_folder
+    Path(download_folder).mkdir(parents=True, exist_ok=True)
+
+    if logger.hasHandlers():
+        logger.handlers.clear()
+    file_handler = logging.FileHandler(filename=f"{log_path}/fusion_sdk.log")
+    logging.addLevelName(VERBOSE_LVL, "VERBOSE")
+    stdout_handler = logging.StreamHandler(sys.stdout)
+    formatter = logging.Formatter(
+        "%(asctime)s.%(msecs)03d %(name)s:%(levelname)s %(message)s",
+        datefmt="%Y-%m-%d %H:%M:%S",
+    )
+    stdout_handler.setFormatter(formatter)
+    logger.addHandler(stdout_handler)
+    logger.addHandler(file_handler)
+    logger.setLevel(log_level)
+
+    if isinstance(credentials, FusionCredentials):
+        self.credentials = credentials
+    elif isinstance(credentials, str):
+        self.credentials = FusionCredentials.from_file(Path(credentials))
+    else:
+        raise ValueError("credentials must be a path to a credentials file or FusionCredentials object")
+
+    self.session = get_session(self.credentials, self.root_url)
+    self.fs = fs if fs else get_default_fs()
+    self.events: pd.DataFrame | None = None
+
+
+
+ +
+ +
+ + +

+ __repr__() + +

+ + +
+ +

Object representation to list all available methods.

+ +
+ Source code in py_src/fusion/fusion.py +
154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
def __repr__(self) -> str:
+    """Object representation to list all available methods."""
+    return "Fusion object \nAvailable methods:\n" + tabulate(
+        pd.DataFrame(  # type: ignore
+            [
+                [
+                    method_name
+                    for method_name in dir(Fusion)
+                    if callable(getattr(Fusion, method_name)) and not method_name.startswith("_")
+                ]
+                + [p for p in dir(Fusion) if isinstance(getattr(Fusion, p), property)],
+                [
+                    getattr(Fusion, method_name).__doc__.split("\n")[0]
+                    for method_name in dir(Fusion)
+                    if callable(getattr(Fusion, method_name)) and not method_name.startswith("_")
+                ]
+                + [
+                    getattr(Fusion, p).__doc__.split("\n")[0]
+                    for p in dir(Fusion)
+                    if isinstance(getattr(Fusion, p), property)
+                ],
+            ]
+        ).T.set_index(0),
+        tablefmt="psql",
+    )
+
+
+
+ +
+ +
+ + +

+ attribute(identifier, index, data_type='String', title='', description='', is_dataset_key=False, source=None, source_field_id=None, is_internal_dataset_key=None, is_externally_visible=True, unit=None, multiplier=1.0, is_propagation_eligible=None, is_metric=None, available_from=None, deprecated_from=None, term='bizterm1', dataset=None, attribute_type=None, application_id=None, **kwargs) + +

+ + +
+ +

Instantiate an Attribute object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

The unique identifier for the attribute.

+
+
+ required +
index + int + +
+

Attribute index.

+
+
+ required +
data_type + str | Types + +
+

Datatype of attribute. Defaults to "String".

+
+
+ 'String' +
title + str + +
+

Attribute title. If not provided, defaults to identifier.

+
+
+ '' +
description + str + +
+

Attribute description. If not provided, defaults to identifier.

+
+
+ '' +
is_dataset_key + bool + +
+

Flag for primary keys. Defaults to False.

+
+
+ False +
source + str | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
+ None +
source_field_id + str | None + +
+

Original identifier of attribute, if attribute has been renamed. +If not provided, defaults to identifier.

+
+
+ None +
is_internal_dataset_key + bool | None + +
+

Flag for internal primary keys. Defaults to None.

+
+
+ None +
is_externally_visible + bool | None + +
+

Flag for externally visible attributes. Defaults to True.

+
+
+ True +
unit + Any | None + +
+

Unit of attribute. Defaults to None.

+
+
+ None +
multiplier + float + +
+

Multiplier for unit. Defaults to 1.0.

+
+
+ 1.0 +
is_propagation_eligible + bool | None + +
+

Flag for propagation eligibility. Defaults to None.

+
+
+ None +
is_metric + bool | None + +
+

Flag for attributes that are metrics. Defaults to None.

+
+
+ None +
available_from + str | None + +
+

Date from which the attribute is available. Defaults to None.

+
+
+ None +
deprecated_from + str | None + +
+

Date from which the attribute is deprecated. Defaults to None.

+
+
+ None +
term + str + +
+

Term. Defaults to "bizterm1".

+
+
+ 'bizterm1' +
dataset + int | None + +
+

Dataset. Defaults to None.

+
+
+ None +
attribute_type + str | None + +
+

Attribute type. Defaults to None.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Attribute + Attribute + +
+

Fusion Attribute class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attr = fusion.attribute(identifier="attr1", index=0)
+
+ + +
+ Note +

See the attributes module for more information on functionalities of attribute objects.

+
+
+ Source code in py_src/fusion/fusion.py +
1768
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+1777
+1778
+1779
+1780
+1781
+1782
+1783
+1784
+1785
+1786
+1787
+1788
+1789
+1790
+1791
+1792
+1793
+1794
+1795
+1796
+1797
+1798
+1799
+1800
+1801
+1802
+1803
+1804
+1805
+1806
+1807
+1808
+1809
+1810
+1811
+1812
+1813
+1814
+1815
+1816
+1817
+1818
+1819
+1820
+1821
+1822
+1823
+1824
+1825
+1826
+1827
+1828
+1829
+1830
+1831
+1832
+1833
+1834
+1835
+1836
+1837
+1838
+1839
+1840
+1841
+1842
+1843
+1844
+1845
+1846
+1847
+1848
+1849
+1850
+1851
+1852
+1853
def attribute(  # noqa: PLR0913
+    self,
+    identifier: str,
+    index: int,
+    data_type: str | Types = "String",
+    title: str = "",
+    description: str = "",
+    is_dataset_key: bool = False,
+    source: str | None = None,
+    source_field_id: str | None = None,
+    is_internal_dataset_key: bool | None = None,
+    is_externally_visible: bool | None = True,
+    unit: Any | None = None,
+    multiplier: float = 1.0,
+    is_propagation_eligible: bool | None = None,
+    is_metric: bool | None = None,
+    available_from: str | None = None,
+    deprecated_from: str | None = None,
+    term: str = "bizterm1",
+    dataset: int | None = None,
+    attribute_type: str | None = None,
+    application_id: str | dict[str, str] | None = None,
+    **kwargs: Any,
+) -> Attribute:
+    """Instantiate an Attribute object with this client for metadata creation.
+
+    Args:
+        identifier (str): The unique identifier for the attribute.
+        index (int): Attribute index.
+        data_type (str | Types, optional): Datatype of attribute. Defaults to "String".
+        title (str, optional): Attribute title. If not provided, defaults to identifier.
+        description (str, optional): Attribute description. If not provided, defaults to identifier.
+        is_dataset_key (bool, optional): Flag for primary keys. Defaults to False.
+        source (str | None, optional): Name of data vendor which provided the data. Defaults to None.
+        source_field_id (str | None, optional): Original identifier of attribute, if attribute has been renamed.
+            If not provided, defaults to identifier.
+        is_internal_dataset_key (bool | None, optional): Flag for internal primary keys. Defaults to None.
+        is_externally_visible (bool | None, optional): Flag for externally visible attributes. Defaults to True.
+        unit (Any | None, optional): Unit of attribute. Defaults to None.
+        multiplier (float, optional): Multiplier for unit. Defaults to 1.0.
+        is_propagation_eligible (bool | None, optional): Flag for propagation eligibility. Defaults to None.
+        is_metric (bool | None, optional): Flag for attributes that are metrics. Defaults to None.
+        available_from (str | None, optional): Date from which the attribute is available. Defaults to None.
+        deprecated_from (str | None, optional): Date from which the attribute is deprecated. Defaults to None.
+        term (str, optional): Term. Defaults to "bizterm1".
+        dataset (int | None, optional): Dataset. Defaults to None.
+        attribute_type (str | None, optional): Attribute type. Defaults to None.
+
+    Returns:
+        Attribute: Fusion Attribute class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attr = fusion.attribute(identifier="attr1", index=0)
+
+    Note:
+        See the attributes module for more information on functionalities of attribute objects.
+
+    """
+    data_type = Types[str(data_type).strip().rsplit(".", maxsplit=1)[-1].title()]
+    attribute_obj = Attribute(
+        identifier=identifier,
+        index=index,
+        data_type=data_type,
+        title=title,
+        description=description,
+        is_dataset_key=is_dataset_key,
+        source=source,
+        source_field_id=source_field_id,
+        is_internal_dataset_key=is_internal_dataset_key,
+        is_externally_visible=is_externally_visible,
+        unit=unit,
+        multiplier=multiplier,
+        is_propagation_eligible=is_propagation_eligible,
+        is_metric=is_metric,
+        available_from=available_from,
+        deprecated_from=deprecated_from,
+        term=term,
+        dataset=dataset,
+        attribute_type=attribute_type,
+        application_id=application_id,
+        **kwargs,
+    )
+    attribute_obj.client = self
+    return attribute_obj
+
+
+
+ +
+ +
+ + +

+ attributes(attributes=None) + +

+ + +
+ +

Instantiate an Attributes object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
attributes + list[Attribute] | None + +
+

List of Attribute objects. Defaults to None.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Attributes + Attributes + +
+

Fusion Attributes class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attr1 = fusion.attribute("attr1", 0)
+>>> attr2 = fusion.attribute("attr2", 1)
+>>> attrs = fusion.attributes([attr1, attr2])
+
+ + +
+ Note +

See the attributes module for more information on functionalities of attributes object.

+
+
+ Source code in py_src/fusion/fusion.py +
1855
+1856
+1857
+1858
+1859
+1860
+1861
+1862
+1863
+1864
+1865
+1866
+1867
+1868
+1869
+1870
+1871
+1872
+1873
+1874
+1875
+1876
+1877
+1878
+1879
+1880
def attributes(
+    self,
+    attributes: list[Attribute] | None = None,
+) -> Attributes:
+    """Instantiate an Attributes object with this client for metadata creation.
+
+    Args:
+        attributes (list[Attribute] | None, optional): List of Attribute objects. Defaults to None.
+
+    Returns:
+        Attributes: Fusion Attributes class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attr1 = fusion.attribute("attr1", 0)
+        >>> attr2 = fusion.attribute("attr2", 1)
+        >>> attrs = fusion.attributes([attr1, attr2])
+
+    Note:
+        See the attributes module for more information on functionalities of attributes object.
+
+    """
+    attributes_obj = Attributes(attributes=attributes or [])
+    attributes_obj.client = self
+    return attributes_obj
+
+
+
+ +
+ +
+ + +

+ catalog_resources(catalog=None, output=False) + +

+ + +
+ +

List the resources contained within the catalog, for example products and datasets.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each resource within the catalog

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
def catalog_resources(self, catalog: str | None = None, output: bool = False) -> pd.DataFrame:
+    """List the resources contained within the catalog, for example products and datasets.
+
+    Args:
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+
+    Returns:
+       class:`pandas.DataFrame`: A dataframe with a row for each resource within the catalog
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}"
+    cat_df = Fusion._call_for_dataframe(url, self.session)
+
+    if output:
+        pass
+
+    return cat_df
+
+
+
+ +
+ +
+ + +

+ create_dataset_lineage(base_dataset, source_dataset_catalog_mapping, catalog=None, return_resp_obj=False) + +

+ + +
+ +

Upload lineage to a dataset.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
base_dataset + str + +
+

A dataset identifier to which you want to add lineage.

+
+
+ required +
source_dataset_catalog_mapping + Union[DataFrame, list[dict[str]]] + +
+

Mapping for the dataset +identifier(s) and catalog(s) from which to add lineage.

+
+
+ required +
catalog + Optional[str] + +
+

Catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Raises:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If source_dataset_catalog_mapping is not a pandas DataFrame or a list of dictionaries

+
+
+ HTTPError + +
+

If the request is unsuccessful.

+
+
+ + +

Examples:

+

Creating lineage from a pandas DataFrame.

+
>>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+>>> df = pd.DataFrame(data)
+>>> fusion = Fusion()
+>>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=df, catalog="c")
+
+

Creating lineage from a list of dictionaries.

+
>>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+>>> fusion = Fusion()
+>>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=data, catalog="c")
+
+ +
+ Source code in py_src/fusion/fusion.py +
1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
def create_dataset_lineage(
+    self,
+    base_dataset: str,
+    source_dataset_catalog_mapping: pd.DataFrame | list[dict[str, str]],
+    catalog: str | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Upload lineage to a dataset.
+
+    Args:
+        base_dataset (str): A dataset identifier to which you want to add lineage.
+        source_dataset_catalog_mapping (Union[pd.DataFrame, list[dict[str]]]): Mapping for the dataset
+            identifier(s) and catalog(s) from which to add lineage.
+        catalog (Optional[str], optional): Catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Raises:
+        ValueError: If source_dataset_catalog_mapping is not a pandas DataFrame or a list of dictionaries
+        HTTPError: If the request is unsuccessful.
+
+    Examples:
+        Creating lineage from a pandas DataFrame.
+        >>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+        >>> df = pd.DataFrame(data)
+        >>> fusion = Fusion()
+        >>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=df, catalog="c")
+
+        Creating lineage from a list of dictionaries.
+        >>> data = [{"dataset": "a", "catalog": "a"}, {"dataset": "b", "catalog": "b"}]
+        >>> fusion = Fusion()
+        >>> fusion.create_dataset_lineage(base_dataset="c", source_dataset_catalog_mapping=data, catalog="c")
+
+    """
+    catalog = self._use_catalog(catalog)
+
+    if isinstance(source_dataset_catalog_mapping, pd.DataFrame):
+        dataset_mapping_list = [
+            {"dataset": row["dataset"], "catalog": row["catalog"]}
+            for _, row in source_dataset_catalog_mapping.iterrows()
+        ]
+    elif isinstance(source_dataset_catalog_mapping, list):
+        dataset_mapping_list = source_dataset_catalog_mapping
+    else:
+        raise ValueError("source_dataset_catalog_mapping must be a pandas DataFrame or a list of dictionaries.")
+    data = {"source": dataset_mapping_list}
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{base_dataset}/lineage"
+
+    resp = self.session.post(url, json=data)
+
+    resp.raise_for_status()
+
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ dataset(identifier, title='', category=None, description='', frequency='Once', is_internal_only_dataset=False, is_third_party_data=True, is_restricted=None, is_raw_data=True, maintainer='J.P. Morgan Fusion', source=None, region=None, publisher='J.P. Morgan', product=None, sub_category=None, tags=None, created_date=None, modified_date=None, delivery_channel='API', language='English', status='Available', type_='Source', container_type='Snapshot-Full', snowflake=None, complexity=None, is_immutable=None, is_mnpi=None, is_pci=None, is_pii=None, is_client=None, is_public=None, is_internal=None, is_confidential=None, is_highly_confidential=None, is_active=None, owners=None, application_id=None, **kwargs) + +

+ + +
+ +

Instantiate a Dataset object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Dataset identifier.

+
+
+ required +
title + str + +
+

Dataset title. If not provided, defaults to identifier.

+
+
+ '' +
category + str | list[str] | None + +
+

A category or list of categories for the dataset.

+
+
+ None +
description + str + +
+

Dataset description. If not provided, defaults to identifier.

+
+
+ '' +
frequency + str + +
+

The frequency of the dataset. Defaults to "Once".

+
+
+ 'Once' +
is_internal_only_dataset + bool + +
+

Flag for internal datasets. Defaults to False.

+
+
+ False +
is_third_party_data + bool + +
+

Flag for third party data. Defaults to True.

+
+
+ True +
is_restricted + bool | None + +
+

Flag for restricted datasets. Defaults to None.

+
+
+ None +
is_raw_data + bool + +
+

Flag for raw datasets. Defaults to True.

+
+
+ True +
maintainer + str | None + +
+

Dataset maintainer. Defaults to "J.P. Morgan Fusion".

+
+
+ 'J.P. Morgan Fusion' +
source + str | list[str] | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
+ None +
region + str | list[str] | None + +
+

Region. Defaults to None.

+
+
+ None +
publisher + str + +
+

Name of vendor that publishes the data. Defaults to "J.P. Morgan".

+
+
+ 'J.P. Morgan' +
product + str | list[str] | None + +
+

Product to associate dataset with. Defaults to None.

+
+
+ None +
sub_category + str | list[str] | None + +
+

Sub-category. Defaults to None.

+
+
+ None +
tags + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
+ None +
created_date + str | None + +
+

Created date. Defaults to None.

+
+
+ None +
modified_date + str | None + +
+

Modified date. Defaults to None.

+
+
+ None +
delivery_channel + str | list[str] + +
+

Delivery channel. Defaults to "API".

+
+
+ 'API' +
language + str + +
+

Language. Defaults to "English".

+
+
+ 'English' +
status + str + +
+

Status. Defaults to "Available".

+
+
+ 'Available' +
type_ + str | None + +
+

Dataset type. Defaults to "Source".

+
+
+ 'Source' +
container_type + str | None + +
+

Container type. Defaults to "Snapshot-Full".

+
+
+ 'Snapshot-Full' +
snowflake + str | None + +
+

Snowflake account connection. Defaults to None.

+
+
+ None +
complexity + str | None + +
+

Complexity. Defaults to None.

+
+
+ None +
is_immutable + bool | None + +
+

Flag for immutable datasets. Defaults to None.

+
+
+ None +
is_mnpi + bool | None + +
+

is_mnpi. Defaults to None.

+
+
+ None +
is_pci + bool | None + +
+

is_pci. Defaults to None.

+
+
+ None +
is_pii + bool | None + +
+

is_pii. Defaults to None.

+
+
+ None +
is_client + bool | None + +
+

is_client. Defaults to None.

+
+
+ None +
is_public + bool | None + +
+

is_public. Defaults to None.

+
+
+ None +
is_internal + bool | None + +
+

is_internal. Defaults to None.

+
+
+ None +
is_confidential + bool | None + +
+

is_confidential. Defaults to None.

+
+
+ None +
is_highly_confidential + bool | None + +
+

is_highly_confidential. Defaults to None.

+
+
+ None +
is_active + bool | None + +
+

is_active. Defaults to None.

+
+
+ None +
owners + list[str] | None + +
+

The owners of the dataset. Defaults to None.

+
+
+ None +
application_id + str | None + +
+

The application ID of the dataset. Defaults to None.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + Dataset + +
+

Fusion Dataset class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset(identifier="DATASET_1")
+
+ + +
+ Note +

See the dataset module for more information on functionalities of dataset objects.

+
+
+ Source code in py_src/fusion/fusion.py +
1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
+1642
+1643
+1644
+1645
+1646
+1647
+1648
+1649
+1650
+1651
+1652
+1653
+1654
+1655
+1656
+1657
+1658
+1659
+1660
+1661
+1662
+1663
+1664
+1665
+1666
+1667
+1668
+1669
+1670
+1671
+1672
+1673
+1674
+1675
+1676
+1677
+1678
+1679
+1680
+1681
+1682
+1683
+1684
+1685
+1686
+1687
+1688
+1689
+1690
+1691
+1692
+1693
+1694
+1695
+1696
+1697
+1698
+1699
+1700
+1701
+1702
+1703
+1704
+1705
+1706
+1707
+1708
+1709
+1710
+1711
+1712
+1713
+1714
+1715
+1716
+1717
+1718
+1719
+1720
+1721
+1722
+1723
+1724
+1725
+1726
+1727
+1728
+1729
+1730
+1731
+1732
+1733
+1734
+1735
+1736
+1737
+1738
+1739
+1740
+1741
+1742
+1743
+1744
+1745
+1746
+1747
+1748
+1749
+1750
+1751
+1752
+1753
+1754
+1755
+1756
+1757
+1758
+1759
+1760
+1761
+1762
+1763
+1764
+1765
+1766
def dataset(  # noqa: PLR0913
+    self,
+    identifier: str,
+    title: str = "",
+    category: str | list[str] | None = None,
+    description: str = "",
+    frequency: str = "Once",
+    is_internal_only_dataset: bool = False,
+    is_third_party_data: bool = True,
+    is_restricted: bool | None = None,
+    is_raw_data: bool = True,
+    maintainer: str | None = "J.P. Morgan Fusion",
+    source: str | list[str] | None = None,
+    region: str | list[str] | None = None,
+    publisher: str = "J.P. Morgan",
+    product: str | list[str] | None = None,
+    sub_category: str | list[str] | None = None,
+    tags: str | list[str] | None = None,
+    created_date: str | None = None,
+    modified_date: str | None = None,
+    delivery_channel: str | list[str] = "API",
+    language: str = "English",
+    status: str = "Available",
+    type_: str | None = "Source",
+    container_type: str | None = "Snapshot-Full",
+    snowflake: str | None = None,
+    complexity: str | None = None,
+    is_immutable: bool | None = None,
+    is_mnpi: bool | None = None,
+    is_pci: bool | None = None,
+    is_pii: bool | None = None,
+    is_client: bool | None = None,
+    is_public: bool | None = None,
+    is_internal: bool | None = None,
+    is_confidential: bool | None = None,
+    is_highly_confidential: bool | None = None,
+    is_active: bool | None = None,
+    owners: list[str] | None = None,
+    application_id: str | dict[str, str] | None = None,
+    **kwargs: Any,
+) -> Dataset:
+    """Instantiate a Dataset object with this client for metadata creation.
+
+    Args:
+        identifier (str): Dataset identifier.
+        title (str, optional): Dataset title. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): A category or list of categories for the dataset.
+        Defaults to None.
+        description (str, optional): Dataset description. If not provided, defaults to identifier.
+        frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+        is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+        is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+        is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+        maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+        source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+        region (str | list[str] | None, optional): Region. Defaults to None.
+        publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+        product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+        sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+        tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        created_date (str | None, optional): Created date. Defaults to None.
+        modified_date (str | None, optional): Modified date. Defaults to None.
+        delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+        language (str, optional): Language. Defaults to "English".
+        status (str, optional): Status. Defaults to "Available".
+        type_ (str | None, optional): Dataset type. Defaults to "Source".
+        container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+        snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+        complexity (str | None, optional): Complexity. Defaults to None.
+        is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+        is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+        is_pci (bool | None, optional): is_pci. Defaults to None.
+        is_pii (bool | None, optional): is_pii. Defaults to None.
+        is_client (bool | None, optional): is_client. Defaults to None.
+        is_public (bool | None, optional): is_public. Defaults to None.
+        is_internal (bool | None, optional): is_internal. Defaults to None.
+        is_confidential (bool | None, optional): is_confidential. Defaults to None.
+        is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+        is_active (bool | None, optional): is_active. Defaults to None.
+        owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+        application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+
+    Returns:
+        Dataset: Fusion Dataset class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset(identifier="DATASET_1")
+
+    Note:
+        See the dataset module for more information on functionalities of dataset objects.
+
+    """
+    dataset_obj = Dataset(
+        identifier=identifier,
+        title=title,
+        category=category,
+        description=description,
+        frequency=frequency,
+        is_internal_only_dataset=is_internal_only_dataset,
+        is_third_party_data=is_third_party_data,
+        is_restricted=is_restricted,
+        is_raw_data=is_raw_data,
+        maintainer=maintainer,
+        source=source,
+        region=region,
+        publisher=publisher,
+        product=product,
+        sub_category=sub_category,
+        tags=tags,
+        created_date=created_date,
+        modified_date=modified_date,
+        delivery_channel=delivery_channel,
+        language=language,
+        status=status,
+        type_=type_,
+        container_type=container_type,
+        snowflake=snowflake,
+        complexity=complexity,
+        is_immutable=is_immutable,
+        is_mnpi=is_mnpi,
+        is_pci=is_pci,
+        is_pii=is_pii,
+        is_client=is_client,
+        is_public=is_public,
+        is_internal=is_internal,
+        is_confidential=is_confidential,
+        is_highly_confidential=is_highly_confidential,
+        is_active=is_active,
+        owners=owners,
+        application_id=application_id,
+        **kwargs,
+    )
+    dataset_obj.client = self
+    return dataset_obj
+
+
+
+ +
+ +
+ + +

+ dataset_resources(dataset, catalog=None, output=False) + +

+ + +
+ +

List the resources available for a dataset, currently this will always be a datasetseries.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each resource

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
def dataset_resources(self, dataset: str, catalog: str | None = None, output: bool = False) -> pd.DataFrame:
+    """List the resources available for a dataset, currently this will always be a datasetseries.
+
+    Args:
+        dataset (str): A dataset identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each resource
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}"
+    ds_res_df = Fusion._call_for_dataframe(url, self.session)
+
+    if output:
+        pass
+
+    return ds_res_df
+
+
+
+ +
+ +
+ + +

+ datasetmember_resources(dataset, series, catalog=None, output=False) + +

+ + +
+ +

List the available resources for a datasetseries member.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
series + str + +
+

The datasetseries identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each datasetseries member resource. +Currently, this will always be distributions.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
def datasetmember_resources(
+    self,
+    dataset: str,
+    series: str,
+    catalog: str | None = None,
+    output: bool = False,
+) -> pd.DataFrame:
+    """List the available resources for a datasetseries member.
+
+    Args:
+        dataset (str): A dataset identifier
+        series (str): The datasetseries identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each datasetseries member resource.
+            Currently, this will always be distributions.
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series}"
+    ds_mem_res_df = Fusion._call_for_dataframe(url, self.session)
+
+    if output:
+        pass
+
+    return ds_mem_res_df
+
+
+
+ +
+ +
+ + +

+ delete_all_datasetmembers(dataset, catalog=None, return_resp_obj=False) + +

+ + +
+ +

Delete all dataset members within a dataset.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
catalog + str | None + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

list[requests.Response]: a list of response objects.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.delete_all_datasetmembers(dataset="dataset1")
+
+ +
+ Source code in py_src/fusion/fusion.py +
1926
+1927
+1928
+1929
+1930
+1931
+1932
+1933
+1934
+1935
+1936
+1937
+1938
+1939
+1940
+1941
+1942
+1943
+1944
+1945
+1946
+1947
+1948
+1949
+1950
+1951
+1952
def delete_all_datasetmembers(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Delete all dataset members within a dataset.
+
+    Args:
+        dataset (str): A dataset identifier
+        catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        list[requests.Response]: a list of response objects.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.delete_all_datasetmembers(dataset="dataset1")
+
+    """
+    catalog = self._use_catalog(catalog)
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries"
+    resp = self.session.delete(url)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ delete_datasetmembers(dataset, series_members, catalog=None, return_resp_obj=False) + +

+ + +
+ +

Delete dataset members.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
series_members + str | list[str] + +
+

A string or list of strings that are dataset series member

+
+
+ required +
catalog + str | None + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[Response] | None + +
+

list[requests.Response]: a list of response objects.

+
+
+ + +

Examples:

+

Delete one dataset member.

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.delete_datasetmembers(dataset="dataset1", series_members="series1")
+
+

Delete multiple dataset members.

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.delete_datasetmembers(dataset="dataset1", series_members=["series1", "series2"])
+
+ +
+ Source code in py_src/fusion/fusion.py +
1882
+1883
+1884
+1885
+1886
+1887
+1888
+1889
+1890
+1891
+1892
+1893
+1894
+1895
+1896
+1897
+1898
+1899
+1900
+1901
+1902
+1903
+1904
+1905
+1906
+1907
+1908
+1909
+1910
+1911
+1912
+1913
+1914
+1915
+1916
+1917
+1918
+1919
+1920
+1921
+1922
+1923
+1924
def delete_datasetmembers(
+    self,
+    dataset: str,
+    series_members: str | list[str],
+    catalog: str | None = None,
+    return_resp_obj: bool = False,
+) -> list[requests.Response] | None:
+    """Delete dataset members.
+
+    Args:
+        dataset (str): A dataset identifier
+        series_members (str | list[str]): A string or list of strings that are dataset series member
+        identifiers to delete.
+        catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        list[requests.Response]: a list of response objects.
+
+    Examples:
+        Delete one dataset member.
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.delete_datasetmembers(dataset="dataset1", series_members="series1")
+
+        Delete multiple dataset members.
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.delete_datasetmembers(dataset="dataset1", series_members=["series1", "series2"])
+
+    """
+    catalog = self._use_catalog(catalog)
+    if isinstance(series_members, str):
+        series_members = [series_members]
+    responses = []
+    for series_member in series_members:
+        url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series_member}"
+        resp = self.session.delete(url)
+        requests_raise_for_status(resp)
+        responses.append(resp)
+    return responses if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ download(dataset, dt_str='latest', dataset_format='parquet', catalog=None, n_par=None, show_progress=True, force_download=False, download_folder=None, return_paths=False, partitioning=None, preserve_original_name=False) + +

+ + +
+ +

Downloads the requested distributions of a dataset to disk.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
dt_str + str + +
+

Either a single date or a range identified by a start or end date, +or both separated with a ":". Defaults to 'latest' which will return the most recent +instance of the dataset. If more than one series member exists on the latest date, the +series member identifiers will be sorted alphabetically and the last one will be downloaded.

+
+
+ 'latest' +
dataset_format + str + +
+

The file format, e.g. CSV or Parquet. Defaults to 'parquet'.

+
+
+ 'parquet' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
n_par + int + +
+

Specify how many distributions to download in parallel. +Defaults to all cpus available.

+
+
+ None +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
force_download + bool + +
+

If True then will always download a file even +if it is already on disk. Defaults to True.

+
+
+ False +
download_folder + str + +
+

The path, absolute or relative, where downloaded files are saved. +Defaults to download_folder as set in init

+
+
+ None +
return_paths + bool + +
+

Return paths and success statuses of the downloaded files.

+
+
+ False +
partitioning + str + +
+

Partitioning specification.

+
+
+ None +
preserve_original_name + bool + +
+

Preserve the original name of the file. Defaults to False.

+
+
+ False +
+ + +
+ Source code in py_src/fusion/fusion.py +
650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
def download(  # noqa: PLR0912, PLR0913
+    self,
+    dataset: str,
+    dt_str: str = "latest",
+    dataset_format: str = "parquet",
+    catalog: str | None = None,
+    n_par: int | None = None,
+    show_progress: bool = True,
+    force_download: bool = False,
+    download_folder: str | None = None,
+    return_paths: bool = False,
+    partitioning: str | None = None,
+    preserve_original_name: bool = False,
+) -> list[tuple[bool, str, str | None]] | None:
+    """Downloads the requested distributions of a dataset to disk.
+
+    Args:
+        dataset (str): A dataset identifier
+        dt_str (str, optional): Either a single date or a range identified by a start or end date,
+            or both separated with a ":". Defaults to 'latest' which will return the most recent
+            instance of the dataset. If more than one series member exists on the latest date, the
+            series member identifiers will be sorted alphabetically and the last one will be downloaded.
+        dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        n_par (int, optional): Specify how many distributions to download in parallel.
+            Defaults to all cpus available.
+        show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+        force_download (bool, optional): If True then will always download a file even
+            if it is already on disk. Defaults to True.
+        download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+            Defaults to download_folder as set in __init__
+        return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+        partitioning (str, optional): Partitioning specification.
+        preserve_original_name (bool, optional): Preserve the original name of the file. Defaults to False.
+
+    Returns:
+
+    """
+    catalog = self._use_catalog(catalog)
+
+    valid_date_range = re.compile(r"^(\d{4}\d{2}\d{2})$|^((\d{4}\d{2}\d{2})?([:])(\d{4}\d{2}\d{2})?)$")
+
+    if valid_date_range.match(dt_str) or dt_str == "latest":
+        required_series = self._resolve_distro_tuples(dataset, dt_str, dataset_format, catalog)
+    else:
+        # sample data is limited to csv
+        if dt_str == "sample":
+            dataset_format = self.list_distributions(dataset, dt_str, catalog)["identifier"].iloc[0]
+        required_series = [(catalog, dataset, dt_str, dataset_format)]
+
+    if dataset_format not in RECOGNIZED_FORMATS + ["raw"]:
+        raise ValueError(f"Dataset format {dataset_format} is not supported")
+
+    if not download_folder:
+        download_folder = self.download_folder
+
+    download_folders = [download_folder] * len(required_series)
+
+    if partitioning == "hive":
+        members = [series[2].strip("/") for series in required_series]
+        download_folders = [
+            f"{download_folders[i]}/{series[0]}/{series[1]}/{members[i]}"
+            for i, series in enumerate(required_series)
+        ]
+
+    for d in download_folders:
+        if not self.fs.exists(d):
+            self.fs.mkdir(d, create_parents=True)
+
+    n_par = cpu_count(n_par)
+    download_spec = [
+        {
+            "lfs": self.fs,
+            "rpath": distribution_to_url(
+                self.root_url,
+                series[1],
+                series[2],
+                series[3],
+                series[0],
+                is_download=True,
+            ),
+            "lpath": distribution_to_filename(
+                download_folders[i],
+                series[1],
+                series[2],
+                series[3],
+                series[0],
+                partitioning=partitioning,
+            ),
+            "overwrite": force_download,
+            "preserve_original_name": preserve_original_name,
+        }
+        for i, series in enumerate(required_series)
+    ]
+
+    logger.log(
+        VERBOSE_LVL,
+        f"Beginning {len(download_spec)} downloads in batches of {n_par}",
+    )
+    if show_progress:
+        with joblib_progress("Downloading", total=len(download_spec)):
+            res = Parallel(n_jobs=n_par)(
+                delayed(self.get_fusion_filesystem().download)(**spec) for spec in download_spec
+            )
+    else:
+        res = Parallel(n_jobs=n_par)(
+            delayed(self.get_fusion_filesystem().download)(**spec) for spec in download_spec
+        )
+
+    if (len(res) > 0) and (not all(r[0] for r in res)):
+        for r in res:
+            if not r[0]:
+                warnings.warn(f"The download of {r[1]} was not successful", stacklevel=2)
+    return res if return_paths else None
+
+
+
+ +
+ +
+ + +

+ from_bytes(data, dataset, series_member='latest', catalog=None, distribution='parquet', show_progress=True, return_paths=False, chunk_size=5 * 2 ** 20, from_date=None, to_date=None, file_name=None, **kwargs) + +

+ + +
+ +

Uploads data from an object in memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + str + +
+

an object in memory to upload

+
+
+ required +
dataset + str + +
+

Dataset name to which the bytes will be uploaded.

+
+
+ required +
series_member + str + +
+

A single date or label. Defaults to 'latest' which will return +the most recent.

+
+
+ 'latest' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
distribution + str + +
+

A distribution type, e.g. a file format or raw

+
+
+ 'parquet' +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
return_paths + bool + +
+

Return paths and success statuses of the downloaded files.

+
+
+ False +
chunk_size + int + +
+

Maximum chunk size.

+
+
+ 5 * 2 ** 20 +
from_date + str + +
+

start of the data date range contained in the distribution, +defaults to upload date

+
+
+ None +
to_date + str + +
+

end of the data date range contained in the distribution, defaults to upload date.

+
+
+ None +
file_name + str + +
+

file name to be used for the uploaded file. Defaults to Fusion standard naming.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[tuple[bool, str, str | None]] | None + +
+

Optional[list[tuple[bool, str, Optional[str]]]: a list of tuples, one for each distribution

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
def from_bytes(  # noqa: PLR0913
+    self,
+    data: BytesIO,
+    dataset: str,
+    series_member: str = "latest",
+    catalog: str | None = None,
+    distribution: str = "parquet",
+    show_progress: bool = True,
+    return_paths: bool = False,
+    chunk_size: int = 5 * 2**20,
+    from_date: str | None = None,
+    to_date: str | None = None,
+    file_name: str | None = None,
+    **kwargs: Any,  # noqa: ARG002
+) -> list[tuple[bool, str, str | None]] | None:
+    """Uploads data from an object in memory.
+
+    Args:
+        data (str): an object in memory to upload
+        dataset (str): Dataset name to which the bytes will be uploaded.
+        series_member (str, optional): A single date or label. Defaults to 'latest' which will return
+            the most recent.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        distribution (str, optional): A distribution type, e.g. a file format or raw
+        show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+        return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+        chunk_size (int, optional): Maximum chunk size.
+        from_date (str, optional): start of the data date range contained in the distribution,
+            defaults to upload date
+        to_date (str, optional): end of the data date range contained in the distribution, defaults to upload date.
+        file_name (str, optional): file name to be used for the uploaded file. Defaults to Fusion standard naming.
+
+    Returns:
+        Optional[list[tuple[bool, str, Optional[str]]]: a list of tuples, one for each distribution
+
+    """
+    catalog = self._use_catalog(catalog)
+
+    fs_fusion = self.get_fusion_filesystem()
+    if distribution not in RECOGNIZED_FORMATS + ["raw"]:
+        raise ValueError(f"Dataset format {distribution} is not supported")
+
+    is_raw = js.loads(fs_fusion.cat(f"{catalog}/datasets/{dataset}"))["isRawData"]
+    local_url_eqiv = path_to_url(f"{dataset}__{catalog}__{series_member}.{distribution}", is_raw)
+
+    data_map_df = pd.DataFrame(["", local_url_eqiv, file_name]).T
+    data_map_df.columns = ["path", "url", "file_name"]  # type: ignore
+
+    res = upload_files(
+        fs_fusion,
+        data,
+        data_map_df,
+        parallel=False,
+        n_par=1,
+        multipart=False,
+        chunk_size=chunk_size,
+        show_progress=show_progress,
+        from_date=from_date,
+        to_date=to_date,
+    )
+
+    if not all(r[0] for r in res):
+        failed_res = [r for r in res if not r[0]]
+        msg = f"Not all uploads were successfully completed. The following failed:\n{failed_res}"
+        logger.warning(msg)
+        warnings.warn(msg, stacklevel=2)
+
+    return res if return_paths else None
+
+
+
+ +
+ +
+ + +

+ get_events(last_event_id=None, catalog=None, in_background=True, url='https://fusion.jpmorgan.com/api/v1/') + +

+ + +
+ +

Run server sent event listener and print out the new events. Keyboard terminate to stop.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
last_event_id + str + +
+

id of the last event.

+
+
+ None +
catalog + str + +
+

catalog.

+
+
+ None +
in_background + bool + +
+

execute event monitoring in the background (default = True).

+
+
+ True +
url + str + +
+

subscription url.

+
+
+ 'https://fusion.jpmorgan.com/api/v1/' +
+ + +
+ Source code in py_src/fusion/fusion.py +
1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
def get_events(
+    self,
+    last_event_id: str | None = None,
+    catalog: str | None = None,
+    in_background: bool = True,
+    url: str = "https://fusion.jpmorgan.com/api/v1/",
+) -> None | pd.DataFrame:
+    """Run server sent event listener and print out the new events. Keyboard terminate to stop.
+
+    Args:
+        last_event_id (str): id of the last event.
+        catalog (str): catalog.
+        in_background (bool): execute event monitoring in the background (default = True).
+        url (str): subscription url.
+    Returns:
+        Union[None, class:`pandas.DataFrame`]: If in_background is True then the function returns no output.
+            If in_background is set to False then pandas DataFrame is output upon keyboard termination.
+    """
+
+    catalog = self._use_catalog(catalog)
+    if not in_background:
+        from sseclient import SSEClient
+
+        _ = self.list_catalogs()  # refresh token
+        interrupted = False
+        messages = SSEClient(
+            session=self.session,
+            url=f"{url}catalogs/{catalog}/notifications/subscribe",
+            last_id=last_event_id,
+            headers={
+                "authorization": f"bearer {self.credentials.bearer_token}",
+            },
+        )
+        lst = []
+        try:
+            for msg in messages:
+                event = js.loads(msg.data)
+                if event["type"] != "HeartBeatNotification":
+                    lst.append(event)
+        except KeyboardInterrupt:
+            interrupted = True
+        except Exception as e:
+            raise e
+        finally:
+            result = pd.DataFrame(lst) if interrupted or lst else None
+        return result
+    else:
+        return self.events
+
+
+
+ +
+ +
+ + +

+ get_fusion_filesystem() + +

+ + +
+ +

Creates Fusion Filesystem.

+

Returns: Fusion Filesystem

+ +
+ Source code in py_src/fusion/fusion.py +
215
+216
+217
+218
+219
+220
+221
def get_fusion_filesystem(self) -> FusionHTTPFileSystem:
+    """Creates Fusion Filesystem.
+
+    Returns: Fusion Filesystem
+
+    """
+    return FusionHTTPFileSystem(client_kwargs={"root_url": self.root_url, "credentials": self.credentials})
+
+
+
+ +
+ +
+ + +

+ input_dataflow(identifier, title='', category=None, description='', frequency='Once', is_internal_only_dataset=False, is_third_party_data=True, is_restricted=None, is_raw_data=True, maintainer='J.P. Morgan Fusion', source=None, region=None, publisher='J.P. Morgan', product=None, sub_category=None, tags=None, created_date=None, modified_date=None, delivery_channel='API', language='English', status='Available', type_='Flow', container_type='Snapshot-Full', snowflake=None, complexity=None, is_immutable=None, is_mnpi=None, is_pci=None, is_pii=None, is_client=None, is_public=None, is_internal=None, is_confidential=None, is_highly_confidential=None, is_active=None, owners=None, application_id=None, producer_application_id=None, consumer_application_id=None, flow_details=None, **kwargs) + +

+ + +
+ +

Instantiate an Input Dataflow object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Dataset identifier.

+
+
+ required +
title + str + +
+

Dataset title. If not provided, defaults to identifier.

+
+
+ '' +
category + str | list[str] | None + +
+

A category or list of categories for the dataset.

+
+
+ None +
description + str + +
+

Dataset description. If not provided, defaults to identifier.

+
+
+ '' +
frequency + str + +
+

The frequency of the dataset. Defaults to "Once".

+
+
+ 'Once' +
is_internal_only_dataset + bool + +
+

Flag for internal datasets. Defaults to False.

+
+
+ False +
is_third_party_data + bool + +
+

Flag for third party data. Defaults to True.

+
+
+ True +
is_restricted + bool | None + +
+

Flag for restricted datasets. Defaults to None.

+
+
+ None +
is_raw_data + bool + +
+

Flag for raw datasets. Defaults to True.

+
+
+ True +
maintainer + str | None + +
+

Dataset maintainer. Defaults to "J.P. Morgan Fusion".

+
+
+ 'J.P. Morgan Fusion' +
source + str | list[str] | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
+ None +
region + str | list[str] | None + +
+

Region. Defaults to None.

+
+
+ None +
publisher + str + +
+

Name of vendor that publishes the data. Defaults to "J.P. Morgan".

+
+
+ 'J.P. Morgan' +
product + str | list[str] | None + +
+

Product to associate dataset with. Defaults to None.

+
+
+ None +
sub_category + str | list[str] | None + +
+

Sub-category. Defaults to None.

+
+
+ None +
tags + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
+ None +
created_date + str | None + +
+

Created date. Defaults to None.

+
+
+ None +
modified_date + str | None + +
+

Modified date. Defaults to None.

+
+
+ None +
delivery_channel + str | list[str] + +
+

Delivery channel. Defaults to "API".

+
+
+ 'API' +
language + str + +
+

Language. Defaults to "English".

+
+
+ 'English' +
status + str + +
+

Status. Defaults to "Available".

+
+
+ 'Available' +
type_ + str | None + +
+

Dataset type. Defaults to "Flow".

+
+
+ 'Flow' +
container_type + str | None + +
+

Container type. Defaults to "Snapshot-Full".

+
+
+ 'Snapshot-Full' +
snowflake + str | None + +
+

Snowflake account connection. Defaults to None.

+
+
+ None +
complexity + str | None + +
+

Complexity. Defaults to None.

+
+
+ None +
is_immutable + bool | None + +
+

Flag for immutable datasets. Defaults to None.

+
+
+ None +
is_mnpi + bool | None + +
+

is_mnpi. Defaults to None.

+
+
+ None +
is_pci + bool | None + +
+

is_pci. Defaults to None.

+
+
+ None +
is_pii + bool | None + +
+

is_pii. Defaults to None.

+
+
+ None +
is_client + bool | None + +
+

is_client. Defaults to None.

+
+
+ None +
is_public + bool | None + +
+

is_public. Defaults to None.

+
+
+ None +
is_internal + bool | None + +
+

is_internal. Defaults to None.

+
+
+ None +
is_confidential + bool | None + +
+

is_confidential. Defaults to None.

+
+
+ None +
is_highly_confidential + bool | None + +
+

is_highly_confidential. Defaults to None.

+
+
+ None +
is_active + bool | None + +
+

is_active. Defaults to None.

+
+
+ None +
owners + list[str] | None + +
+

The owners of the dataset. Defaults to None.

+
+
+ None +
application_id + str | None + +
+

The application ID of the dataset. Defaults to None.

+
+
+ None +
producer_application_id + dict[str, str] | None + +
+

The producer application ID (upstream application +producing the flow).

+
+
+ None +
consumer_application_id + list[dict[str, str]] | dict[str, str] | None + +
+

The consumer application +ID (downstream application, consuming the flow).

+
+
+ None +
flow_details + dict[str, str] | None + +
+

The flow details. Specifies input versus output flow. +Defaults to {"flowDirection": "Input"}.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + InputDataFlow + +
+

Fusion InputDataFlow class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.input_dataflow(identifier="MY_DATAFLOW")
+
+ + +
+ Note +

See the dataset module for more information on functionalities of input dataflow objects.

+
+
+ Source code in py_src/fusion/fusion.py +
2137
+2138
+2139
+2140
+2141
+2142
+2143
+2144
+2145
+2146
+2147
+2148
+2149
+2150
+2151
+2152
+2153
+2154
+2155
+2156
+2157
+2158
+2159
+2160
+2161
+2162
+2163
+2164
+2165
+2166
+2167
+2168
+2169
+2170
+2171
+2172
+2173
+2174
+2175
+2176
+2177
+2178
+2179
+2180
+2181
+2182
+2183
+2184
+2185
+2186
+2187
+2188
+2189
+2190
+2191
+2192
+2193
+2194
+2195
+2196
+2197
+2198
+2199
+2200
+2201
+2202
+2203
+2204
+2205
+2206
+2207
+2208
+2209
+2210
+2211
+2212
+2213
+2214
+2215
+2216
+2217
+2218
+2219
+2220
+2221
+2222
+2223
+2224
+2225
+2226
+2227
+2228
+2229
+2230
+2231
+2232
+2233
+2234
+2235
+2236
+2237
+2238
+2239
+2240
+2241
+2242
+2243
+2244
+2245
+2246
+2247
+2248
+2249
+2250
+2251
+2252
+2253
+2254
+2255
+2256
+2257
+2258
+2259
+2260
+2261
+2262
+2263
+2264
+2265
+2266
+2267
+2268
+2269
+2270
+2271
+2272
+2273
+2274
+2275
+2276
+2277
+2278
+2279
+2280
+2281
+2282
+2283
+2284
+2285
+2286
def input_dataflow(  # noqa: PLR0913
+    self,
+    identifier: str,
+    title: str = "",
+    category: str | list[str] | None = None,
+    description: str = "",
+    frequency: str = "Once",
+    is_internal_only_dataset: bool = False,
+    is_third_party_data: bool = True,
+    is_restricted: bool | None = None,
+    is_raw_data: bool = True,
+    maintainer: str | None = "J.P. Morgan Fusion",
+    source: str | list[str] | None = None,
+    region: str | list[str] | None = None,
+    publisher: str = "J.P. Morgan",
+    product: str | list[str] | None = None,
+    sub_category: str | list[str] | None = None,
+    tags: str | list[str] | None = None,
+    created_date: str | None = None,
+    modified_date: str | None = None,
+    delivery_channel: str | list[str] = "API",
+    language: str = "English",
+    status: str = "Available",
+    type_: str | None = "Flow",
+    container_type: str | None = "Snapshot-Full",
+    snowflake: str | None = None,
+    complexity: str | None = None,
+    is_immutable: bool | None = None,
+    is_mnpi: bool | None = None,
+    is_pci: bool | None = None,
+    is_pii: bool | None = None,
+    is_client: bool | None = None,
+    is_public: bool | None = None,
+    is_internal: bool | None = None,
+    is_confidential: bool | None = None,
+    is_highly_confidential: bool | None = None,
+    is_active: bool | None = None,
+    owners: list[str] | None = None,
+    application_id: str | dict[str, str] | None = None,
+    producer_application_id: dict[str, str] | None = None,
+    consumer_application_id: list[dict[str, str]] | dict[str, str] | None = None,
+    flow_details: dict[str, str] | None = None,
+    **kwargs: Any,
+) -> InputDataFlow:
+    """Instantiate an Input Dataflow object with this client for metadata creation.
+
+    Args:
+        identifier (str): Dataset identifier.
+        title (str, optional): Dataset title. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): A category or list of categories for the dataset.
+        Defaults to None.
+        description (str, optional): Dataset description. If not provided, defaults to identifier.
+        frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+        is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+        is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+        is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+        maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+        source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+        region (str | list[str] | None, optional): Region. Defaults to None.
+        publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+        product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+        sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+        tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        created_date (str | None, optional): Created date. Defaults to None.
+        modified_date (str | None, optional): Modified date. Defaults to None.
+        delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+        language (str, optional): Language. Defaults to "English".
+        status (str, optional): Status. Defaults to "Available".
+        type_ (str | None, optional): Dataset type. Defaults to "Flow".
+        container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+        snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+        complexity (str | None, optional): Complexity. Defaults to None.
+        is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+        is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+        is_pci (bool | None, optional): is_pci. Defaults to None.
+        is_pii (bool | None, optional): is_pii. Defaults to None.
+        is_client (bool | None, optional): is_client. Defaults to None.
+        is_public (bool | None, optional): is_public. Defaults to None.
+        is_internal (bool | None, optional): is_internal. Defaults to None.
+        is_confidential (bool | None, optional): is_confidential. Defaults to None.
+        is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+        is_active (bool | None, optional): is_active. Defaults to None.
+        owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+        application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+        producer_application_id (dict[str, str] | None, optional): The producer application ID (upstream application
+            producing the flow).
+        consumer_application_id (list[dict[str, str]] | dict[str, str] | None, optional): The consumer application 
+            ID (downstream application, consuming the flow).
+        flow_details (dict[str, str] | None, optional): The flow details. Specifies input versus output flow.
+            Defaults to {"flowDirection": "Input"}.
+
+    Returns:
+        Dataset: Fusion InputDataFlow class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.input_dataflow(identifier="MY_DATAFLOW")
+
+    Note:
+        See the dataset module for more information on functionalities of input dataflow objects.
+
+    """
+    flow_details = {"flowDirection": "Input"} if flow_details is None else flow_details
+    dataflow_obj = InputDataFlow(
+        identifier=identifier,
+        title=title,
+        category=category,
+        description=description,
+        frequency=frequency,
+        is_internal_only_dataset=is_internal_only_dataset,
+        is_third_party_data=is_third_party_data,
+        is_restricted=is_restricted,
+        is_raw_data=is_raw_data,
+        maintainer=maintainer,
+        source=source,
+        region=region,
+        publisher=publisher,
+        product=product,
+        sub_category=sub_category,
+        tags=tags,
+        created_date=created_date,
+        modified_date=modified_date,
+        delivery_channel=delivery_channel,
+        language=language,
+        status=status,
+        type_=type_,
+        container_type=container_type,
+        snowflake=snowflake,
+        complexity=complexity,
+        is_immutable=is_immutable,
+        is_mnpi=is_mnpi,
+        is_pci=is_pci,
+        is_pii=is_pii,
+        is_client=is_client,
+        is_public=is_public,
+        is_internal=is_internal,
+        is_confidential=is_confidential,
+        is_highly_confidential=is_highly_confidential,
+        is_active=is_active,
+        owners=owners,
+        application_id=application_id,
+        producer_application_id=producer_application_id,
+        consumer_application_id=consumer_application_id,
+        flow_details=flow_details,
+        **kwargs,
+    )
+    dataflow_obj.client = self
+    return dataflow_obj
+
+
+
+ +
+ +
+ + +

+ list_catalogs(output=False) + +

+ + +
+ +

Lists the catalogs available to the API account.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each catalog

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
def list_catalogs(self, output: bool = False) -> pd.DataFrame:
+    """Lists the catalogs available to the API account.
+
+    Args:
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each catalog
+    """
+    url = f"{self.root_url}catalogs/"
+    cat_df = Fusion._call_for_dataframe(url, self.session)
+
+    if output:
+        pass
+
+    return cat_df
+
+
+
+ +
+ +
+ + +

+ list_dataset_attributes(dataset, catalog=None, output=False, display_all_columns=False) + +

+ + +
+ +

Returns the list of attributes that are in the dataset.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
display_all_columns + bool + +
+

If True displays all columns returned by the API, +otherwise only the key columns are displayed

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each attribute

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
def list_dataset_attributes(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    output: bool = False,
+    display_all_columns: bool = False,
+) -> pd.DataFrame:
+    """Returns the list of attributes that are in the dataset.
+
+    Args:
+        dataset (str): A dataset identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        display_all_columns (bool, optional): If True displays all columns returned by the API,
+            otherwise only the key columns are displayed
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each attribute
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+    ds_attr_df = Fusion._call_for_dataframe(url, self.session)
+
+    if "index" in ds_attr_df.columns: 
+        ds_attr_df = ds_attr_df.sort_values(by="index").reset_index(drop=True)
+
+    if not display_all_columns:
+        ds_attr_df = ds_attr_df[
+            ds_attr_df.columns.intersection(
+                [
+                    "identifier",
+                    "title",
+                    "dataType",
+                    "isDatasetKey",
+                    "description",
+                    "source",
+                ]
+            )
+        ]
+
+    if output:
+        pass
+
+    return ds_attr_df
+
+
+
+ +
+ +
+ + +

+ list_dataset_lineage(dataset_id, catalog=None, output=False, max_results=-1) + +

+ + +
+ +

List the upstream and downstream lineage of the dataset.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
max_results + int + +
+

Limit the number of rows returned in the dataframe. +Defaults to -1 which returns all results.

+
+
+ -1 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each resource

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ HTTPError + +
+

If the dataset is not found in the catalog.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
def list_dataset_lineage(
+    self,
+    dataset_id: str,
+    catalog: str | None = None,
+    output: bool = False,
+    max_results: int = -1,
+) -> pd.DataFrame:
+    """List the upstream and downstream lineage of the dataset.
+
+    Args:
+        dataset (str): A dataset identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        max_results (int, optional): Limit the number of rows returned in the dataframe.
+            Defaults to -1 which returns all results.
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each resource
+
+    Raises:
+        HTTPError: If the dataset is not found in the catalog.
+
+    """
+    catalog = self._use_catalog(catalog)
+
+    url_dataset = f"{self.root_url}catalogs/{catalog}/datasets/{dataset_id}"
+    resp_dataset = self.session.get(url_dataset)
+    resp_dataset.raise_for_status()
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset_id}/lineage"
+    resp = self.session.get(url)
+    data = resp.json()
+    relations_data = data["relations"]
+
+    restricted_datasets = [
+        dataset_metadata["identifier"]
+        for dataset_metadata in data["datasets"]
+        if dataset_metadata.get("status", None) == "Restricted"
+    ]
+
+    data_dict = {}
+
+    for entry in relations_data:
+        source_dataset_id = entry["source"]["dataset"]
+        source_catalog = entry["source"]["catalog"]
+        destination_dataset_id = entry["destination"]["dataset"]
+        destination_catalog = entry["destination"]["catalog"]
+
+        if destination_dataset_id == dataset_id:
+            for dataset in data["datasets"]:
+                if dataset["identifier"] == source_dataset_id and dataset.get("status", None) != "Restricted":
+                    source_dataset_title = dataset["title"]
+                elif dataset["identifier"] == source_dataset_id and dataset.get("status", None) == "Restricted":
+                    source_dataset_title = "Access Restricted"
+            data_dict[source_dataset_id] = (
+                "source",
+                source_catalog,
+                source_dataset_title,
+            )
+
+        if source_dataset_id == dataset_id:
+            for dataset in data["datasets"]:
+                if dataset["identifier"] == destination_dataset_id and dataset.get("status", None) != "Restricted":
+                    destination_dataset_title = dataset["title"]
+                elif (
+                    dataset["identifier"] == destination_dataset_id and dataset.get("status", None) == "Restricted"
+                ):
+                    destination_dataset_title = "Access Restricted"
+            data_dict[destination_dataset_id] = (
+                "produced",
+                destination_catalog,
+                destination_dataset_title,
+            )
+
+    output_data = {
+        "type": [v[0] for v in data_dict.values()],
+        "dataset_identifier": list(data_dict.keys()),
+        "title": [v[2] for v in data_dict.values()],
+        "catalog": [v[1] for v in data_dict.values()],
+    }
+
+    lineage_df = pd.DataFrame(output_data)
+    lineage_df.loc[
+        lineage_df["dataset_identifier"].isin(restricted_datasets),
+        ["dataset_identifier", "catalog", "title"],
+    ] = "Access Restricted"
+
+    if max_results > -1:
+        lineage_df = lineage_df[0:max_results]
+
+    if output:
+        pass
+
+    return lineage_df
+
+
+
+ +
+ +
+ + +

+ list_datasetmembers(dataset, catalog=None, output=False, max_results=-1) + +

+ + +
+ +

List the available members in the dataset series.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
max_results + int + +
+

Limit the number of rows returned in the dataframe. +Defaults to -1 which returns all results.

+
+
+ -1 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: a dataframe with a row for each dataset member.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
def list_datasetmembers(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    output: bool = False,
+    max_results: int = -1,
+) -> pd.DataFrame:
+    """List the available members in the dataset series.
+
+    Args:
+        dataset (str): A dataset identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        max_results (int, optional): Limit the number of rows returned in the dataframe.
+            Defaults to -1 which returns all results.
+
+    Returns:
+        class:`pandas.DataFrame`: a dataframe with a row for each dataset member.
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries"
+    ds_members_df = Fusion._call_for_dataframe(url, self.session)
+
+    if max_results > -1:
+        ds_members_df = ds_members_df[0:max_results]
+
+    if output:
+        pass
+
+    return ds_members_df
+
+
+
+ +
+ +
+ + +

+ list_datasets(contains=None, id_contains=False, product=None, catalog=None, output=False, max_results=-1, display_all_columns=False, status=None, dataset_type=None) + +

+ + +
+ +

Get the datasets contained in a catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
contains + Union[str, list] + +
+

A string or a list of strings that are dataset +identifiers to filter the datasets list. If a list is provided then it will return +datasets whose identifier matches any of the strings. Defaults to None.

+
+
+ None +
id_contains + bool + +
+

Filter datasets only where the string(s) are contained in the identifier, +ignoring description.

+
+
+ False +
product + Union[str, list] + +
+

A string or a list of strings that are product +identifiers to filter the datasets list. Defaults to None.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
max_results + int + +
+

Limit the number of rows returned in the dataframe. +Defaults to -1 which returns all results.

+
+
+ -1 +
display_all_columns + bool + +
+

If True displays all columns returned by the API, +otherwise only the key columns are displayed

+
+
+ False +
status + str + +
+

filter the datasets by status, default is to show all results.

+
+
+ None +
dataset_type + str + +
+

filter the datasets by type, default is to show all results.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: a dataframe with a row for each dataset.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
def list_datasets(  # noqa: PLR0913
+    self,
+    contains: str | list[str] | None = None,
+    id_contains: bool = False,
+    product: str | list[str] | None = None,
+    catalog: str | None = None,
+    output: bool = False,
+    max_results: int = -1,
+    display_all_columns: bool = False,
+    status: str | None = None,
+    dataset_type: str | None = None,
+) -> pd.DataFrame:
+    """Get the datasets contained in a catalog.
+
+    Args:
+        contains (Union[str, list], optional): A string or a list of strings that are dataset
+            identifiers to filter the datasets list. If a list is provided then it will return
+            datasets whose identifier matches any of the strings. Defaults to None.
+        id_contains (bool): Filter datasets only where the string(s) are contained in the identifier,
+            ignoring description.
+        product (Union[str, list], optional): A string or a list of strings that are product
+            identifiers to filter the datasets list. Defaults to None.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        max_results (int, optional): Limit the number of rows returned in the dataframe.
+            Defaults to -1 which returns all results.
+        display_all_columns (bool, optional): If True displays all columns returned by the API,
+            otherwise only the key columns are displayed
+        status (str, optional): filter the datasets by status, default is to show all results.
+        dataset_type (str, optional): filter the datasets by type, default is to show all results.
+
+    Returns:
+        class:`pandas.DataFrame`: a dataframe with a row for each dataset.
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets"
+    ds_df = Fusion._call_for_dataframe(url, self.session)
+
+    if contains:
+        if isinstance(contains, list):
+            contains = "|".join(f"{s}" for s in contains)
+        if id_contains:
+            ds_df = ds_df[ds_df["identifier"].str.contains(contains, case=False)]
+        else:
+            ds_df = ds_df[
+                ds_df["identifier"].str.contains(contains, case=False)
+                | ds_df["description"].str.contains(contains, case=False)
+            ]
+
+    if product:
+        url = f"{self.root_url}catalogs/{catalog}/productDatasets"
+        prd_df = Fusion._call_for_dataframe(url, self.session)
+        prd_df = (
+            prd_df[prd_df["product"] == product]
+            if isinstance(product, str)
+            else prd_df[prd_df["product"].isin(product)]
+        )
+        ds_df = ds_df[ds_df["identifier"].str.lower().isin(prd_df["dataset"].str.lower())].reset_index(drop=True)
+
+    if max_results > -1:
+        ds_df = ds_df[0:max_results]
+
+    ds_df["category"] = ds_df.category.str.join(", ")
+    ds_df["region"] = ds_df.region.str.join(", ")
+    if not display_all_columns:
+        cols = [
+            "identifier",
+            "title",
+            "containerType",
+            "region",
+            "category",
+            "coverageStartDate",
+            "coverageEndDate",
+            "description",
+            "status",
+            "type",
+        ]
+        cols = [c for c in cols if c in ds_df.columns]
+        ds_df = ds_df[cols]
+
+    if status is not None:
+        ds_df = ds_df[ds_df["status"] == status]
+
+    if dataset_type is not None:
+        ds_df = ds_df[ds_df["type"] == dataset_type]
+
+    if output:
+        pass
+
+    return ds_df
+
+
+
+ +
+ +
+ + +

+ list_distributions(dataset, series, catalog=None, output=False) + +

+ + +
+ +

List the available distributions (downloadable instances of the dataset with a format type).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
series + str + +
+

The datasetseries identifier

+
+
+ required +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each distribution.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
def list_distributions(
+    self,
+    dataset: str,
+    series: str,
+    catalog: str | None = None,
+    output: bool = False,
+) -> pd.DataFrame:
+    """List the available distributions (downloadable instances of the dataset with a format type).
+
+    Args:
+        dataset (str): A dataset identifier
+        series (str): The datasetseries identifier
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each distribution.
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/datasets/{dataset}/datasetseries/{series}/distributions"
+    distros_df = Fusion._call_for_dataframe(url, self.session)
+
+    if output:
+        pass
+
+    return distros_df
+
+
+
+ +
+ +
+ + +

+ list_product_dataset_mapping(dataset=None, product=None, catalog=None) + +

+ + +
+ +

get the product to dataset linking contained in a catalog. A product is a grouping of datasets.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str | list[str] | None + +
+

A string or list of strings that are dataset

+
+
+ None +
product + str | list[str] | None + +
+

A string or list of strings that are product

+
+
+ None +
catalog + str | None + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ DataFrame + +
+

pd.DataFrame: a dataframe with a row for each dataset to product mapping.

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
def list_product_dataset_mapping(
+    self,
+    dataset: str | list[str] | None = None,
+    product: str | list[str] | None = None,
+    catalog: str | None = None,
+) -> pd.DataFrame:
+    """get the product to dataset linking contained in  a catalog. A product is a grouping of datasets.
+
+    Args:
+        dataset (str | list[str] | None, optional): A string or list of strings that are dataset
+        identifiers to filter the output. If a list is provided then it will return
+        datasets whose identifier matches any of the strings. Defaults to None.
+        product (str | list[str] | None, optional): A string or list of strings that are product
+        identifiers to filter the output. If a list is provided then it will return
+        products whose identifier matches any of the strings. Defaults to None.
+        catalog (str | None, optional): A catalog identifier. Defaults to 'common'.
+
+    Returns:
+        pd.DataFrame: a dataframe with a row  for each dataset to product mapping.
+    """
+    catalog = self._use_catalog(catalog)
+    url = f"{self.root_url}catalogs/{catalog}/productDatasets"
+    mapping_df = pd.DataFrame(self._call_for_dataframe(url, self.session))
+
+    if dataset:
+        if isinstance(dataset, list):
+            contains = "|".join(f"{s}" for s in dataset)
+            mapping_df = mapping_df[mapping_df["dataset"].str.contains(contains, case=False)]
+        if isinstance(dataset, str):
+            mapping_df = mapping_df[mapping_df["dataset"].str.contains(dataset, case=False)]
+    if product:
+        if isinstance(product, list):
+            contains = "|".join(f"{s}" for s in product)
+            mapping_df = mapping_df[mapping_df["product"].str.contains(contains, case=False)]
+        if isinstance(product, str):
+            mapping_df = mapping_df[mapping_df["product"].str.contains(product, case=False)]
+    return mapping_df
+
+
+
+ +
+ +
+ + +

+ list_products(contains=None, id_contains=False, catalog=None, output=False, max_results=-1, display_all_columns=False) + +

+ + +
+ +

Get the products contained in a catalog. A product is a grouping of datasets.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
contains + Union[str, list] + +
+

A string or a list of strings that are product +identifiers to filter the products list. If a list is provided then it will return +products whose identifier matches any of the strings. Defaults to None.

+
+
+ None +
id_contains + bool + +
+

Filter datasets only where the string(s) are contained in the identifier, +ignoring description.

+
+
+ False +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
max_results + int + +
+

Limit the number of rows returned in the dataframe. +Defaults to -1 which returns all results.

+
+
+ -1 +
display_all_columns + bool + +
+

If True displays all columns returned by the API, +otherwise only the key columns are displayed

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: a dataframe with a row for each product

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
def list_products(
+    self,
+    contains: str | list[str] | None = None,
+    id_contains: bool = False,
+    catalog: str | None = None,
+    output: bool = False,
+    max_results: int = -1,
+    display_all_columns: bool = False,
+) -> pd.DataFrame:
+    """Get the products contained in a catalog. A product is a grouping of datasets.
+
+    Args:
+        contains (Union[str, list], optional): A string or a list of strings that are product
+            identifiers to filter the products list. If a list is provided then it will return
+            products whose identifier matches any of the strings. Defaults to None.
+        id_contains (bool): Filter datasets only where the string(s) are contained in the identifier,
+            ignoring description.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        max_results (int, optional): Limit the number of rows returned in the dataframe.
+            Defaults to -1 which returns all results.
+        display_all_columns (bool, optional): If True displays all columns returned by the API,
+            otherwise only the key columns are displayed
+
+    Returns:
+        class:`pandas.DataFrame`: a dataframe with a row for each product
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/products"
+    full_prod_df: pd.DataFrame = Fusion._call_for_dataframe(url, self.session)
+
+    if contains:
+        if isinstance(contains, list):
+            contains = "|".join(f"{s}" for s in contains)
+        if id_contains:
+            filtered_df = full_prod_df[full_prod_df["identifier"].str.contains(contains, case=False)]
+        else:
+            filtered_df = full_prod_df[
+                full_prod_df["identifier"].str.contains(contains, case=False)
+                | full_prod_df["description"].str.contains(contains, case=False)
+            ]
+    else:
+        filtered_df = full_prod_df
+
+    filtered_df["category"] = filtered_df.category.str.join(", ")
+    filtered_df["region"] = filtered_df.region.str.join(", ")
+    if not display_all_columns:
+        filtered_df = filtered_df[
+            filtered_df.columns.intersection(
+                [
+                    "identifier",
+                    "title",
+                    "region",
+                    "category",
+                    "status",
+                    "description",
+                ]
+            )
+        ]
+
+    if max_results > -1:
+        filtered_df = filtered_df[0:max_results]
+
+    if output:
+        pass
+
+    return filtered_df
+
+
+
+ +
+ +
+ + +

+ list_registered_attributes(catalog=None, output=False, display_all_columns=False) + +

+ + +
+ +

Returns the list of attributes in a catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
output + bool + +
+

If True then print the dataframe. Defaults to False.

+
+
+ False +
display_all_columns + bool + +
+

If True displays all columns returned by the API, +otherwise only the key columns are displayed

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
class + DataFrame + +
+

pandas.DataFrame: A dataframe with a row for each attribute

+
+
+ +
+ Source code in py_src/fusion/fusion.py +
1954
+1955
+1956
+1957
+1958
+1959
+1960
+1961
+1962
+1963
+1964
+1965
+1966
+1967
+1968
+1969
+1970
+1971
+1972
+1973
+1974
+1975
+1976
+1977
+1978
+1979
+1980
+1981
+1982
+1983
+1984
+1985
+1986
+1987
+1988
+1989
+1990
+1991
+1992
+1993
def list_registered_attributes(
+    self,
+    catalog: str | None = None,
+    output: bool = False,
+    display_all_columns: bool = False,
+) -> pd.DataFrame:
+    """Returns the list of attributes in a catalog.
+
+    Args:
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        output (bool, optional): If True then print the dataframe. Defaults to False.
+        display_all_columns (bool, optional): If True displays all columns returned by the API,
+            otherwise only the key columns are displayed
+
+    Returns:
+        class:`pandas.DataFrame`: A dataframe with a row for each attribute
+    """
+    catalog = self._use_catalog(catalog)
+
+    url = f"{self.root_url}catalogs/{catalog}/attributes"
+    ds_attr_df = Fusion._call_for_dataframe(url, self.session).reset_index(drop=True)
+
+    if not display_all_columns:
+        ds_attr_df = ds_attr_df[
+            ds_attr_df.columns.intersection(
+                [
+                    "identifier",
+                    "title",
+                    "dataType",
+                    "description",
+                    "publisher",
+                    "applicationId",
+                ]
+            )
+        ]
+
+    if output:
+        pass
+
+    return ds_attr_df
+
+
+
+ +
+ +
+ + +

+ listen_to_events(last_event_id=None, catalog=None, url='https://fusion.jpmorgan.com/api/v1/') + +

+ + +
+ +

Run server sent event listener in the background. Retrieve results by running get_events.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
last_event_id + str + +
+

Last event ID (exclusive).

+
+
+ None +
catalog + str + +
+

catalog.

+
+
+ None +
url + str + +
+

subscription url.

+
+
+ 'https://fusion.jpmorgan.com/api/v1/' +
+ + +
+ Source code in py_src/fusion/fusion.py +
1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
def listen_to_events(
+    self,
+    last_event_id: str | None = None,
+    catalog: str | None = None,
+    url: str = "https://fusion.jpmorgan.com/api/v1/",
+) -> None | pd.DataFrame:
+    """Run server sent event listener in the background. Retrieve results by running get_events.
+
+    Args:
+        last_event_id (str): Last event ID (exclusive).
+        catalog (str): catalog.
+        url (str): subscription url.
+    Returns:
+        Union[None, class:`pandas.DataFrame`]: If in_background is True then the function returns no output.
+            If in_background is set to False then pandas DataFrame is output upon keyboard termination.
+    """
+
+    catalog = self._use_catalog(catalog)
+    import asyncio
+    import json
+    import threading
+
+    from aiohttp_sse_client import client as sse_client
+
+    from .utils import get_client
+
+    kwargs: dict[str, Any] = {}
+    if last_event_id:
+        kwargs = {"headers": {"Last-Event-ID": last_event_id}}
+
+    async def async_events() -> None:
+        """Events sync function.
+
+        Returns:
+            None
+        """
+        timeout = 1e100
+        session = await get_client(self.credentials, timeout=timeout)
+        async with sse_client.EventSource(
+            f"{url}catalogs/{catalog}/notifications/subscribe",
+            session=session,
+            **kwargs,
+        ) as messages:
+            lst = []
+            try:
+                async for msg in messages:
+                    event = json.loads(msg.data)
+                    lst.append(event)
+                    if self.events is None:
+                        self.events = pd.DataFrame()
+                    else:
+                        self.events = pd.concat([self.events, pd.DataFrame(lst)], ignore_index=True)
+            except TimeoutError as ex:
+                raise ex from None
+            except BaseException:
+                raise
+
+    _ = self.list_catalogs()  # refresh token
+    if "headers" in kwargs:
+        kwargs["headers"].update({"authorization": f"bearer {self.credentials.bearer_token}"})
+    else:
+        kwargs["headers"] = {
+            "authorization": f"bearer {self.credentials.bearer_token}",
+        }
+    if "http" in self.credentials.proxies:
+        kwargs["proxy"] = self.credentials.proxies["http"]
+    elif "https" in self.credentials.proxies:
+        kwargs["proxy"] = self.credentials.proxies["https"]
+    th = threading.Thread(target=asyncio.run, args=(async_events(),), daemon=True)
+    th.start()
+    return None
+
+
+
+ +
+ +
+ + +

+ output_dataflow(identifier, title='', category=None, description='', frequency='Once', is_internal_only_dataset=False, is_third_party_data=True, is_restricted=None, is_raw_data=True, maintainer='J.P. Morgan Fusion', source=None, region=None, publisher='J.P. Morgan', product=None, sub_category=None, tags=None, created_date=None, modified_date=None, delivery_channel='API', language='English', status='Available', type_='Flow', container_type='Snapshot-Full', snowflake=None, complexity=None, is_immutable=None, is_mnpi=None, is_pci=None, is_pii=None, is_client=None, is_public=None, is_internal=None, is_confidential=None, is_highly_confidential=None, is_active=None, owners=None, application_id=None, producer_application_id=None, consumer_application_id=None, flow_details=None, **kwargs) + +

+ + +
+ +

Instantiate an Output Dataflow object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Dataset identifier.

+
+
+ required +
title + str + +
+

Dataset title. If not provided, defaults to identifier.

+
+
+ '' +
category + str | list[str] | None + +
+

A category or list of categories for the dataset.

+
+
+ None +
description + str + +
+

Dataset description. If not provided, defaults to identifier.

+
+
+ '' +
frequency + str + +
+

The frequency of the dataset. Defaults to "Once".

+
+
+ 'Once' +
is_internal_only_dataset + bool + +
+

Flag for internal datasets. Defaults to False.

+
+
+ False +
is_third_party_data + bool + +
+

Flag for third party data. Defaults to True.

+
+
+ True +
is_restricted + bool | None + +
+

Flag for restricted datasets. Defaults to None.

+
+
+ None +
is_raw_data + bool + +
+

Flag for raw datasets. Defaults to True.

+
+
+ True +
maintainer + str | None + +
+

Dataset maintainer. Defaults to "J.P. Morgan Fusion".

+
+
+ 'J.P. Morgan Fusion' +
source + str | list[str] | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
+ None +
region + str | list[str] | None + +
+

Region. Defaults to None.

+
+
+ None +
publisher + str + +
+

Name of vendor that publishes the data. Defaults to "J.P. Morgan".

+
+
+ 'J.P. Morgan' +
product + str | list[str] | None + +
+

Product to associate dataset with. Defaults to None.

+
+
+ None +
sub_category + str | list[str] | None + +
+

Sub-category. Defaults to None.

+
+
+ None +
tags + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
+ None +
created_date + str | None + +
+

Created date. Defaults to None.

+
+
+ None +
modified_date + str | None + +
+

Modified date. Defaults to None.

+
+
+ None +
delivery_channel + str | list[str] + +
+

Delivery channel. Defaults to "API".

+
+
+ 'API' +
language + str + +
+

Language. Defaults to "English".

+
+
+ 'English' +
status + str + +
+

Status. Defaults to "Available".

+
+
+ 'Available' +
type_ + str | None + +
+

Dataset type. Defaults to "Flow".

+
+
+ 'Flow' +
container_type + str | None + +
+

Container type. Defaults to "Snapshot-Full".

+
+
+ 'Snapshot-Full' +
snowflake + str | None + +
+

Snowflake account connection. Defaults to None.

+
+
+ None +
complexity + str | None + +
+

Complexity. Defaults to None.

+
+
+ None +
is_immutable + bool | None + +
+

Flag for immutable datasets. Defaults to None.

+
+
+ None +
is_mnpi + bool | None + +
+

is_mnpi. Defaults to None.

+
+
+ None +
is_pci + bool | None + +
+

is_pci. Defaults to None.

+
+
+ None +
is_pii + bool | None + +
+

is_pii. Defaults to None.

+
+
+ None +
is_client + bool | None + +
+

is_client. Defaults to None.

+
+
+ None +
is_public + bool | None + +
+

is_public. Defaults to None.

+
+
+ None +
is_internal + bool | None + +
+

is_internal. Defaults to None.

+
+
+ None +
is_confidential + bool | None + +
+

is_confidential. Defaults to None.

+
+
+ None +
is_highly_confidential + bool | None + +
+

is_highly_confidential. Defaults to None.

+
+
+ None +
is_active + bool | None + +
+

is_active. Defaults to None.

+
+
+ None +
owners + list[str] | None + +
+

The owners of the dataset. Defaults to None.

+
+
+ None +
application_id + str | None + +
+

The application ID of the dataset. Defaults to None.

+
+
+ None +
producer_application_id + dict[str, str] | None + +
+

The producer application ID (upstream application +producing the flow).

+
+
+ None +
consumer_application_id + list[dict[str, str]] | dict[str, str] | None + +
+

The consumer application +ID (downstream application, consuming the flow).

+
+
+ None +
flow_details + dict[str, str] | None + +
+

The flow details. Specifies input versus output flow. +Defaults to {"flowDirection": "Output"}.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + OutputDataFlow + +
+

Fusion OutputDataFlow class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.output_dataflow(identifier="MY_DATAFLOW")
+
+ + +
+ Note +

See the dataset module for more information on functionalities of output dataflow objects.

+
+
+ Source code in py_src/fusion/fusion.py +
2288
+2289
+2290
+2291
+2292
+2293
+2294
+2295
+2296
+2297
+2298
+2299
+2300
+2301
+2302
+2303
+2304
+2305
+2306
+2307
+2308
+2309
+2310
+2311
+2312
+2313
+2314
+2315
+2316
+2317
+2318
+2319
+2320
+2321
+2322
+2323
+2324
+2325
+2326
+2327
+2328
+2329
+2330
+2331
+2332
+2333
+2334
+2335
+2336
+2337
+2338
+2339
+2340
+2341
+2342
+2343
+2344
+2345
+2346
+2347
+2348
+2349
+2350
+2351
+2352
+2353
+2354
+2355
+2356
+2357
+2358
+2359
+2360
+2361
+2362
+2363
+2364
+2365
+2366
+2367
+2368
+2369
+2370
+2371
+2372
+2373
+2374
+2375
+2376
+2377
+2378
+2379
+2380
+2381
+2382
+2383
+2384
+2385
+2386
+2387
+2388
+2389
+2390
+2391
+2392
+2393
+2394
+2395
+2396
+2397
+2398
+2399
+2400
+2401
+2402
+2403
+2404
+2405
+2406
+2407
+2408
+2409
+2410
+2411
+2412
+2413
+2414
+2415
+2416
+2417
+2418
+2419
+2420
+2421
+2422
+2423
+2424
+2425
+2426
+2427
+2428
+2429
+2430
+2431
+2432
+2433
+2434
+2435
+2436
+2437
def output_dataflow(  # noqa: PLR0913
+    self,
+    identifier: str,
+    title: str = "",
+    category: str | list[str] | None = None,
+    description: str = "",
+    frequency: str = "Once",
+    is_internal_only_dataset: bool = False,
+    is_third_party_data: bool = True,
+    is_restricted: bool | None = None,
+    is_raw_data: bool = True,
+    maintainer: str | None = "J.P. Morgan Fusion",
+    source: str | list[str] | None = None,
+    region: str | list[str] | None = None,
+    publisher: str = "J.P. Morgan",
+    product: str | list[str] | None = None,
+    sub_category: str | list[str] | None = None,
+    tags: str | list[str] | None = None,
+    created_date: str | None = None,
+    modified_date: str | None = None,
+    delivery_channel: str | list[str] = "API",
+    language: str = "English",
+    status: str = "Available",
+    type_: str | None = "Flow",
+    container_type: str | None = "Snapshot-Full",
+    snowflake: str | None = None,
+    complexity: str | None = None,
+    is_immutable: bool | None = None,
+    is_mnpi: bool | None = None,
+    is_pci: bool | None = None,
+    is_pii: bool | None = None,
+    is_client: bool | None = None,
+    is_public: bool | None = None,
+    is_internal: bool | None = None,
+    is_confidential: bool | None = None,
+    is_highly_confidential: bool | None = None,
+    is_active: bool | None = None,
+    owners: list[str] | None = None,
+    application_id: str | dict[str, str] | None = None,
+    producer_application_id: dict[str, str] | None = None,
+    consumer_application_id: list[dict[str, str]] | dict[str, str] | None = None,
+    flow_details: dict[str, str] | None = None,
+    **kwargs: Any,
+) -> OutputDataFlow:
+    """Instantiate an Output Dataflow object with this client for metadata creation.
+
+    Args:
+        identifier (str): Dataset identifier.
+        title (str, optional): Dataset title. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): A category or list of categories for the dataset.
+        Defaults to None.
+        description (str, optional): Dataset description. If not provided, defaults to identifier.
+        frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+        is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+        is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+        is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+        maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+        source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+        region (str | list[str] | None, optional): Region. Defaults to None.
+        publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+        product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+        sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+        tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        created_date (str | None, optional): Created date. Defaults to None.
+        modified_date (str | None, optional): Modified date. Defaults to None.
+        delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+        language (str, optional): Language. Defaults to "English".
+        status (str, optional): Status. Defaults to "Available".
+        type_ (str | None, optional): Dataset type. Defaults to "Flow".
+        container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+        snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+        complexity (str | None, optional): Complexity. Defaults to None.
+        is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+        is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+        is_pci (bool | None, optional): is_pci. Defaults to None.
+        is_pii (bool | None, optional): is_pii. Defaults to None.
+        is_client (bool | None, optional): is_client. Defaults to None.
+        is_public (bool | None, optional): is_public. Defaults to None.
+        is_internal (bool | None, optional): is_internal. Defaults to None.
+        is_confidential (bool | None, optional): is_confidential. Defaults to None.
+        is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+        is_active (bool | None, optional): is_active. Defaults to None.
+        owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+        application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+        producer_application_id (dict[str, str] | None, optional): The producer application ID (upstream application
+            producing the flow).
+        consumer_application_id (list[dict[str, str]] | dict[str, str] | None, optional): The consumer application 
+            ID (downstream application, consuming the flow).
+        flow_details (dict[str, str] | None, optional): The flow details. Specifies input versus output flow.
+            Defaults to {"flowDirection": "Output"}.
+
+    Returns:
+        Dataset: Fusion OutputDataFlow class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.output_dataflow(identifier="MY_DATAFLOW")
+
+    Note:
+        See the dataset module for more information on functionalities of output dataflow objects.
+
+    """
+    flow_details = {"flowDirection": "Output"} if flow_details is None else flow_details
+    dataflow_obj = OutputDataFlow(
+        identifier=identifier,
+        title=title,
+        category=category,
+        description=description,
+        frequency=frequency,
+        is_internal_only_dataset=is_internal_only_dataset,
+        is_third_party_data=is_third_party_data,
+        is_restricted=is_restricted,
+        is_raw_data=is_raw_data,
+        maintainer=maintainer,
+        source=source,
+        region=region,
+        publisher=publisher,
+        product=product,
+        sub_category=sub_category,
+        tags=tags,
+        created_date=created_date,
+        modified_date=modified_date,
+        delivery_channel=delivery_channel,
+        language=language,
+        status=status,
+        type_=type_,
+        container_type=container_type,
+        snowflake=snowflake,
+        complexity=complexity,
+        is_immutable=is_immutable,
+        is_mnpi=is_mnpi,
+        is_pci=is_pci,
+        is_pii=is_pii,
+        is_client=is_client,
+        is_public=is_public,
+        is_internal=is_internal,
+        is_confidential=is_confidential,
+        is_highly_confidential=is_highly_confidential,
+        is_active=is_active,
+        owners=owners,
+        application_id=application_id,
+        producer_application_id=producer_application_id,
+        consumer_application_id=consumer_application_id,
+        flow_details=flow_details,
+        **kwargs,
+    )
+    dataflow_obj.client = self
+    return dataflow_obj
+
+
+
+ +
+ +
+ + +

+ product(identifier, title='', category=None, short_abstract='', description='', is_active=True, is_restricted=None, maintainer=None, region='Global', publisher='J.P. Morgan', sub_category=None, tag=None, delivery_channel='API', theme=None, release_date=None, language='English', status='Available', image='', logo='', dataset=None, **kwargs) + +

+ + +
+ +

Instantiate a Product object with this client for metadata creation.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Product identifier.

+
+
+ required +
title + str + +
+

Product title. If not provided, defaults to identifier.

+
+
+ '' +
category + str | list[str] | None + +
+

Category. Defaults to None.

+
+
+ None +
short_abstract + str + +
+

Short description. Defaults to "".

+
+
+ '' +
description + str + +
+

Description. If not provided, defaults to identifier.

+
+
+ '' +
is_active + bool + +
+

Boolean for Active status. Defaults to True.

+
+
+ True +
is_restricted + bool | None + +
+

Flag for restricted products. Defaults to None.

+
+
+ None +
maintainer + str | list[str] | None + +
+

Product maintainer. Defaults to None.

+
+
+ None +
region + str | list[str] | None + +
+

Product region. Defaults to None.

+
+
+ 'Global' +
publisher + str | None + +
+

Name of vendor that publishes the data. Defaults to None.

+
+
+ 'J.P. Morgan' +
sub_category + str | list[str] | None + +
+

Product sub-category. Defaults to None.

+
+
+ None +
tag + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
+ None +
delivery_channel + str | list[str] + +
+

Product delivery channel. Defaults to "API".

+
+
+ 'API' +
theme + str | None + +
+

Product theme. Defaults to None.

+
+
+ None +
release_date + str | None + +
+

Product release date. Defaults to None.

+
+
+ None +
language + str + +
+

Product language. Defaults to "English".

+
+
+ 'English' +
status + str + +
+

Product status. Defaults to "Available".

+
+
+ 'Available' +
image + str + +
+

Product image. Defaults to "".

+
+
+ '' +
logo + str + +
+

Product logo. Defaults to "".

+
+
+ '' +
dataset + str | list[str] | None + +
+

Product datasets. Defaults to None.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Product + Product + +
+

Fusion Product class instance.

+
+
+ + +

Examples:

+
>>> fusion = Fusion()
+>>> fusion.product(identifier="PRODUCT_1", title="Product")
+
+ + +
+ Note +

See the product module for more information on functionalities of product objects.

+
+
+ Source code in py_src/fusion/fusion.py +
1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
def product(  # noqa: PLR0913
+    self,
+    identifier: str,
+    title: str = "",
+    category: str | list[str] | None = None,
+    short_abstract: str = "",
+    description: str = "",
+    is_active: bool = True,
+    is_restricted: bool | None = None,
+    maintainer: str | list[str] | None = None,
+    region: str | list[str] = "Global",
+    publisher: str = "J.P. Morgan",
+    sub_category: str | list[str] | None = None,
+    tag: str | list[str] | None = None,
+    delivery_channel: str | list[str] = "API",
+    theme: str | None = None,
+    release_date: str | None = None,
+    language: str = "English",
+    status: str = "Available",
+    image: str = "",
+    logo: str = "",
+    dataset: str | list[str] | None = None,
+    **kwargs: Any,
+) -> Product:
+    """Instantiate a Product object with this client for metadata creation.
+
+    Args:
+        identifier (str): Product identifier.
+        title (str, optional): Product title. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): Category. Defaults to None.
+        short_abstract (str, optional): Short description. Defaults to "".
+        description (str, optional): Description. If not provided, defaults to identifier.
+        is_active (bool, optional): Boolean for Active status. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted products. Defaults to None.
+        maintainer (str | list[str] | None, optional): Product maintainer. Defaults to None.
+        region (str | list[str] | None, optional): Product region. Defaults to None.
+        publisher (str | None, optional): Name of vendor that publishes the data. Defaults to None.
+        sub_category (str | list[str] | None, optional): Product sub-category. Defaults to None.
+        tag (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        delivery_channel (str | list[str], optional): Product delivery channel. Defaults to "API".
+        theme (str | None, optional): Product theme. Defaults to None.
+        release_date (str | None, optional): Product release date. Defaults to None.
+        language (str, optional): Product language. Defaults to "English".
+        status (str, optional): Product status. Defaults to "Available".
+        image (str, optional): Product image. Defaults to "".
+        logo (str, optional): Product logo. Defaults to "".
+        dataset (str | list[str] | None, optional): Product datasets. Defaults to None.
+
+    Returns:
+        Product: Fusion Product class instance.
+
+    Examples:
+        >>> fusion = Fusion()
+        >>> fusion.product(identifier="PRODUCT_1", title="Product")
+
+    Note:
+        See the product module for more information on functionalities of product objects.
+
+    """
+    product_obj = Product(
+        identifier=identifier,
+        title=title,
+        category=category,
+        short_abstract=short_abstract,
+        description=description,
+        is_active=is_active,
+        is_restricted=is_restricted,
+        maintainer=maintainer,
+        region=region,
+        publisher=publisher,
+        sub_category=sub_category,
+        tag=tag,
+        delivery_channel=delivery_channel,
+        theme=theme,
+        release_date=release_date,
+        language=language,
+        status=status,
+        image=image,
+        logo=logo,
+        dataset=dataset,
+        **kwargs,
+    )
+    product_obj.client = self
+    return product_obj
+
+
+
+ +
+ +
+ + +

+ report(identifier, title='', category=None, description='', frequency='Once', is_internal_only_dataset=False, is_third_party_data=True, is_restricted=None, is_raw_data=True, maintainer='J.P. Morgan Fusion', source=None, region=None, publisher='J.P. Morgan', product=None, sub_category=None, tags=None, created_date=None, modified_date=None, delivery_channel='API', language='English', status='Available', type_='Report', container_type='Snapshot-Full', snowflake=None, complexity=None, is_immutable=None, is_mnpi=None, is_pci=None, is_pii=None, is_client=None, is_public=None, is_internal=None, is_confidential=None, is_highly_confidential=None, is_active=None, owners=None, application_id=None, report=None, **kwargs) + +

+ + +
+ +

Instantiate Report object with this client for metadata creation for managing regulatory reporting metadata.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Dataset identifier.

+
+
+ required +
title + str + +
+

Dataset title. If not provided, defaults to identifier.

+
+
+ '' +
category + str | list[str] | None + +
+

A category or list of categories for the dataset.

+
+
+ None +
description + str + +
+

Dataset description. If not provided, defaults to identifier.

+
+
+ '' +
frequency + str + +
+

The frequency of the dataset. Defaults to "Once".

+
+
+ 'Once' +
is_internal_only_dataset + bool + +
+

Flag for internal datasets. Defaults to False.

+
+
+ False +
is_third_party_data + bool + +
+

Flag for third party data. Defaults to True.

+
+
+ True +
is_restricted + bool | None + +
+

Flag for restricted datasets. Defaults to None.

+
+
+ None +
is_raw_data + bool + +
+

Flag for raw datasets. Defaults to True.

+
+
+ True +
maintainer + str | None + +
+

Dataset maintainer. Defaults to "J.P. Morgan Fusion".

+
+
+ 'J.P. Morgan Fusion' +
source + str | list[str] | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
+ None +
region + str | list[str] | None + +
+

Region. Defaults to None.

+
+
+ None +
publisher + str + +
+

Name of vendor that publishes the data. Defaults to "J.P. Morgan".

+
+
+ 'J.P. Morgan' +
product + str | list[str] | None + +
+

Product to associate dataset with. Defaults to None.

+
+
+ None +
sub_category + str | list[str] | None + +
+

Sub-category. Defaults to None.

+
+
+ None +
tags + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
+ None +
created_date + str | None + +
+

Created date. Defaults to None.

+
+
+ None +
modified_date + str | None + +
+

Modified date. Defaults to None.

+
+
+ None +
delivery_channel + str | list[str] + +
+

Delivery channel. Defaults to "API".

+
+
+ 'API' +
language + str + +
+

Language. Defaults to "English".

+
+
+ 'English' +
status + str + +
+

Status. Defaults to "Available".

+
+
+ 'Available' +
type_ + str | None + +
+

Dataset type. Defaults to "Source".

+
+
+ 'Report' +
container_type + str | None + +
+

Container type. Defaults to "Snapshot-Full".

+
+
+ 'Snapshot-Full' +
snowflake + str | None + +
+

Snowflake account connection. Defaults to None.

+
+
+ None +
complexity + str | None + +
+

Complexity. Defaults to None.

+
+
+ None +
is_immutable + bool | None + +
+

Flag for immutable datasets. Defaults to None.

+
+
+ None +
is_mnpi + bool | None + +
+

is_mnpi. Defaults to None.

+
+
+ None +
is_pci + bool | None + +
+

is_pci. Defaults to None.

+
+
+ None +
is_pii + bool | None + +
+

is_pii. Defaults to None.

+
+
+ None +
is_client + bool | None + +
+

is_client. Defaults to None.

+
+
+ None +
is_public + bool | None + +
+

is_public. Defaults to None.

+
+
+ None +
is_internal + bool | None + +
+

is_internal. Defaults to None.

+
+
+ None +
is_confidential + bool | None + +
+

is_confidential. Defaults to None.

+
+
+ None +
is_highly_confidential + bool | None + +
+

is_highly_confidential. Defaults to None.

+
+
+ None +
is_active + bool | None + +
+

is_active. Defaults to None.

+
+
+ None +
owners + list[str] | None + +
+

The owners of the dataset. Defaults to None.

+
+
+ None +
application_id + str | None + +
+

The application ID of the dataset. Defaults to None.

+
+
+ None +
report + dict[str, str] | None + +
+

The report metadata. Specifies the tier of the report. +Required for registered reports to the catalog.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + Report + +
+

Fusion Dataset class.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.report(identifier="DATASET_1")
+
+ + +
+ Note +

See the dataset module for more information on functionalities of report objects.

+
+
+ Source code in py_src/fusion/fusion.py +
1995
+1996
+1997
+1998
+1999
+2000
+2001
+2002
+2003
+2004
+2005
+2006
+2007
+2008
+2009
+2010
+2011
+2012
+2013
+2014
+2015
+2016
+2017
+2018
+2019
+2020
+2021
+2022
+2023
+2024
+2025
+2026
+2027
+2028
+2029
+2030
+2031
+2032
+2033
+2034
+2035
+2036
+2037
+2038
+2039
+2040
+2041
+2042
+2043
+2044
+2045
+2046
+2047
+2048
+2049
+2050
+2051
+2052
+2053
+2054
+2055
+2056
+2057
+2058
+2059
+2060
+2061
+2062
+2063
+2064
+2065
+2066
+2067
+2068
+2069
+2070
+2071
+2072
+2073
+2074
+2075
+2076
+2077
+2078
+2079
+2080
+2081
+2082
+2083
+2084
+2085
+2086
+2087
+2088
+2089
+2090
+2091
+2092
+2093
+2094
+2095
+2096
+2097
+2098
+2099
+2100
+2101
+2102
+2103
+2104
+2105
+2106
+2107
+2108
+2109
+2110
+2111
+2112
+2113
+2114
+2115
+2116
+2117
+2118
+2119
+2120
+2121
+2122
+2123
+2124
+2125
+2126
+2127
+2128
+2129
+2130
+2131
+2132
+2133
+2134
+2135
def report(  # noqa: PLR0913
+    self,
+    identifier: str,
+    title: str = "",
+    category: str | list[str] | None = None,
+    description: str = "",
+    frequency: str = "Once",
+    is_internal_only_dataset: bool = False,
+    is_third_party_data: bool = True,
+    is_restricted: bool | None = None,
+    is_raw_data: bool = True,
+    maintainer: str | None = "J.P. Morgan Fusion",
+    source: str | list[str] | None = None,
+    region: str | list[str] | None = None,
+    publisher: str = "J.P. Morgan",
+    product: str | list[str] | None = None,
+    sub_category: str | list[str] | None = None,
+    tags: str | list[str] | None = None,
+    created_date: str | None = None,
+    modified_date: str | None = None,
+    delivery_channel: str | list[str] = "API",
+    language: str = "English",
+    status: str = "Available",
+    type_: str | None = "Report",
+    container_type: str | None = "Snapshot-Full",
+    snowflake: str | None = None,
+    complexity: str | None = None,
+    is_immutable: bool | None = None,
+    is_mnpi: bool | None = None,
+    is_pci: bool | None = None,
+    is_pii: bool | None = None,
+    is_client: bool | None = None,
+    is_public: bool | None = None,
+    is_internal: bool | None = None,
+    is_confidential: bool | None = None,
+    is_highly_confidential: bool | None = None,
+    is_active: bool | None = None,
+    owners: list[str] | None = None,
+    application_id: str | dict[str, str] | None = None,
+    report: dict[str, str] | None = None,
+    **kwargs: Any,
+) -> Report:
+    """Instantiate Report object with this client for metadata creation for managing regulatory reporting metadata.
+
+    Args:
+        identifier (str): Dataset identifier.
+        title (str, optional): Dataset title. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): A category or list of categories for the dataset.
+        Defaults to None.
+        description (str, optional): Dataset description. If not provided, defaults to identifier.
+        frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+        is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+        is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+        is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+        maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+        source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+        region (str | list[str] | None, optional): Region. Defaults to None.
+        publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+        product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+        sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+        tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        created_date (str | None, optional): Created date. Defaults to None.
+        modified_date (str | None, optional): Modified date. Defaults to None.
+        delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+        language (str, optional): Language. Defaults to "English".
+        status (str, optional): Status. Defaults to "Available".
+        type_ (str | None, optional): Dataset type. Defaults to "Source".
+        container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+        snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+        complexity (str | None, optional): Complexity. Defaults to None.
+        is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+        is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+        is_pci (bool | None, optional): is_pci. Defaults to None.
+        is_pii (bool | None, optional): is_pii. Defaults to None.
+        is_client (bool | None, optional): is_client. Defaults to None.
+        is_public (bool | None, optional): is_public. Defaults to None.
+        is_internal (bool | None, optional): is_internal. Defaults to None.
+        is_confidential (bool | None, optional): is_confidential. Defaults to None.
+        is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+        is_active (bool | None, optional): is_active. Defaults to None.
+        owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+        application_id (str | None, optional): The application ID of the dataset. Defaults to None.
+        report (dict[str, str] | None, optional): The report metadata. Specifies the tier of the report.
+            Required for registered reports to the catalog.
+
+    Returns:
+        Dataset: Fusion Dataset class.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.report(identifier="DATASET_1")
+
+    Note:
+        See the dataset module for more information on functionalities of report objects.
+
+    """
+    report_obj = Report(
+        identifier=identifier,
+        title=title,
+        category=category,
+        description=description,
+        frequency=frequency,
+        is_internal_only_dataset=is_internal_only_dataset,
+        is_third_party_data=is_third_party_data,
+        is_restricted=is_restricted,
+        is_raw_data=is_raw_data,
+        maintainer=maintainer,
+        source=source,
+        region=region,
+        publisher=publisher,
+        product=product,
+        sub_category=sub_category,
+        tags=tags,
+        created_date=created_date,
+        modified_date=modified_date,
+        delivery_channel=delivery_channel,
+        language=language,
+        status=status,
+        type_=type_,
+        container_type=container_type,
+        snowflake=snowflake,
+        complexity=complexity,
+        is_immutable=is_immutable,
+        is_mnpi=is_mnpi,
+        is_pci=is_pci,
+        is_pii=is_pii,
+        is_client=is_client,
+        is_public=is_public,
+        is_internal=is_internal,
+        is_confidential=is_confidential,
+        is_highly_confidential=is_highly_confidential,
+        is_active=is_active,
+        owners=owners,
+        application_id=application_id,
+        report=report,
+        **kwargs,
+    )
+    report_obj.client = self
+    return report_obj
+
+
+
+ +
+ +
+ + +

+ to_bytes(dataset, series_member, dataset_format='parquet', catalog=None) + +

+ + +
+ +

Returns an instance of dataset (the distribution) as a bytes object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
series_member + (str) + +
+

A dataset series member identifier

+
+
+ required +
dataset_format + str + +
+

The file format, e.g. CSV or Parquet. Defaults to 'parquet'.

+
+
+ 'parquet' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
+ +
+ Source code in py_src/fusion/fusion.py +
913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
def to_bytes(
+    self,
+    dataset: str,
+    series_member: str,
+    dataset_format: str = "parquet",
+    catalog: str | None = None,
+) -> BytesIO:
+    """Returns an instance of dataset (the distribution) as a bytes object.
+
+    Args:
+        dataset (str): A dataset identifier
+        series_member (str,): A dataset series member identifier
+        dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+    """
+
+    catalog = self._use_catalog(catalog)
+
+    url = distribution_to_url(
+        self.root_url,
+        dataset,
+        series_member,
+        dataset_format,
+        catalog,
+    )
+
+    return Fusion._call_for_bytes_object(url, self.session)
+
+
+
+ +
+ +
+ + +

+ to_df(dataset, dt_str='latest', dataset_format='parquet', catalog=None, n_par=None, show_progress=True, columns=None, filters=None, force_download=False, download_folder=None, dataframe_type='pandas', **kwargs) + +

+ + +
+ +

Gets distributions for a specified date or date range and returns the data as a dataframe.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
dt_str + str + +
+

Either a single date or a range identified by a start or end date, +or both separated with a ":". Defaults to 'latest' which will return the most recent +instance of the dataset.

+
+
+ 'latest' +
dataset_format + str + +
+

The file format, e.g. CSV or Parquet. Defaults to 'parquet'.

+
+
+ 'parquet' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
n_par + int + +
+

Specify how many distributions to download in parallel. +Defaults to all cpus available.

+
+
+ None +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
columns + List + +
+

A list of columns to return from a parquet file. Defaults to None

+
+
+ None +
filters + List + +
+

List[Tuple] or List[List[Tuple]] or None (default) +Rows which do not match the filter predicate will be removed from scanned data. +Partition keys embedded in a nested directory structure will be exploited to avoid +loading files at all if they contain no matching rows. If use_legacy_dataset is True, +filters can only reference partition keys and only a hive-style directory structure +is supported. When setting use_legacy_dataset to False, also within-file level filtering +and different partitioning schemes are supported. +More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html

+
+
+ None +
force_download + bool + +
+

If True then will always download a file even +if it is already on disk. Defaults to False.

+
+
+ False +
download_folder + str + +
+

The path, absolute or relative, where downloaded files are saved. +Defaults to download_folder as set in init

+
+
+ None +
dataframe_type + str + +
+

Type

+
+
+ 'pandas' +
+ + +
+ Source code in py_src/fusion/fusion.py +
765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
def to_df(  # noqa: PLR0913
+    self,
+    dataset: str,
+    dt_str: str = "latest",
+    dataset_format: str = "parquet",
+    catalog: str | None = None,
+    n_par: int | None = None,
+    show_progress: bool = True,
+    columns: list[str] | None = None,
+    filters: PyArrowFilterT | None = None,
+    force_download: bool = False,
+    download_folder: str | None = None,
+    dataframe_type: str = "pandas",
+    **kwargs: Any,
+) -> pd.DataFrame:
+    """Gets distributions for a specified date or date range and returns the data as a dataframe.
+
+    Args:
+        dataset (str): A dataset identifier
+        dt_str (str, optional): Either a single date or a range identified by a start or end date,
+            or both separated with a ":". Defaults to 'latest' which will return the most recent
+            instance of the dataset.
+        dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        n_par (int, optional): Specify how many distributions to download in parallel.
+            Defaults to all cpus available.
+        show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+        columns (List, optional): A list of columns to return from a parquet file. Defaults to None
+        filters (List, optional): List[Tuple] or List[List[Tuple]] or None (default)
+            Rows which do not match the filter predicate will be removed from scanned data.
+            Partition keys embedded in a nested directory structure will be exploited to avoid
+            loading files at all if they contain no matching rows. If use_legacy_dataset is True,
+            filters can only reference partition keys and only a hive-style directory structure
+            is supported. When setting use_legacy_dataset to False, also within-file level filtering
+            and different partitioning schemes are supported.
+            More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
+        force_download (bool, optional): If True then will always download a file even
+            if it is already on disk. Defaults to False.
+        download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+            Defaults to download_folder as set in __init__
+        dataframe_type (str, optional): Type
+    Returns:
+        class:`pandas.DataFrame`: a dataframe containing the requested data.
+            If multiple dataset instances are retrieved then these are concatenated first.
+    """
+    catalog = self._use_catalog(catalog)
+
+    # sample data is limited to csv
+    if dt_str == "sample":
+        dataset_format = "csv"
+
+    if not download_folder:
+        download_folder = self.download_folder
+    download_res = self.download(
+        dataset,
+        dt_str,
+        dataset_format,
+        catalog,
+        n_par,
+        show_progress,
+        force_download,
+        download_folder,
+        return_paths=True,
+    )
+
+    if not download_res:
+        raise ValueError("Must specify 'return_paths=True' in download call to use this function")
+
+    if not all(res[0] for res in download_res):
+        failed_res = [res for res in download_res if not res[0]]
+        raise Exception(
+            f"Not all downloads were successfully completed. "
+            f"Re-run to collect missing files. The following failed:\n{failed_res}"
+        )
+
+    files = [res[1] for res in download_res]
+
+    pd_read_fn_map = {
+        "csv": read_csv,
+        "parquet": read_parquet,
+        "parq": read_parquet,
+        "json": read_json,
+        "raw": read_csv,
+    }
+
+    pd_read_default_kwargs: dict[str, dict[str, object]] = {
+        "csv": {
+            "columns": columns,
+            "filters": filters,
+            "fs": self.fs,
+            "dataframe_type": dataframe_type,
+        },
+        "parquet": {
+            "columns": columns,
+            "filters": filters,
+            "fs": self.fs,
+            "dataframe_type": dataframe_type,
+        },
+        "json": {
+            "columns": columns,
+            "filters": filters,
+            "fs": self.fs,
+            "dataframe_type": dataframe_type,
+        },
+        "raw": {
+            "columns": columns,
+            "filters": filters,
+            "fs": self.fs,
+            "dataframe_type": dataframe_type,
+        },
+    }
+
+    pd_read_default_kwargs["parq"] = pd_read_default_kwargs["parquet"]
+
+    pd_reader = pd_read_fn_map.get(dataset_format)
+    pd_read_kwargs = pd_read_default_kwargs.get(dataset_format, {})
+    if not pd_reader:
+        raise Exception(f"No pandas function to read file in format {dataset_format}")
+
+    pd_read_kwargs.update(kwargs)
+
+    if len(files) == 0:
+        raise APIResponseError(
+            f"No series members for dataset: {dataset} "
+            f"in date or date range: {dt_str} and format: {dataset_format}"
+        )
+    if dataset_format in ["parquet", "parq"]:
+        data_df = pd_reader(files, **pd_read_kwargs)  # type: ignore
+    elif dataset_format == "raw":
+        dataframes = (
+            pd.concat(
+                [pd_reader(ZipFile(f).open(p), **pd_read_kwargs) for p in ZipFile(f).namelist()],  # type: ignore
+                ignore_index=True,
+            )
+            for f in files
+        )
+        data_df = pd.concat(dataframes, ignore_index=True)
+    else:
+        dataframes = (pd_reader(f, **pd_read_kwargs) for f in files)  # type: ignore
+        if dataframe_type == "pandas":
+            data_df = pd.concat(dataframes, ignore_index=True)
+        if dataframe_type == "polars":
+            import polars as pl
+
+            data_df = pl.concat(dataframes, how="diagonal")  # type: ignore
+
+    return data_df
+
+
+
+ +
+ +
+ + +

+ to_table(dataset, dt_str='latest', dataset_format='parquet', catalog=None, n_par=None, show_progress=True, columns=None, filters=None, force_download=False, download_folder=None, **kwargs) + +

+ + +
+ +

Gets distributions for a specified date or date range and returns the data as an arrow table.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

A dataset identifier

+
+
+ required +
dt_str + str + +
+

Either a single date or a range identified by a start or end date, +or both separated with a ":". Defaults to 'latest' which will return the most recent +instance of the dataset.

+
+
+ 'latest' +
dataset_format + str + +
+

The file format, e.g. CSV or Parquet. Defaults to 'parquet'.

+
+
+ 'parquet' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
n_par + int + +
+

Specify how many distributions to download in parallel. +Defaults to all cpus available.

+
+
+ None +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
columns + List + +
+

A list of columns to return from a parquet file. Defaults to None

+
+
+ None +
filters + List + +
+

List[Tuple] or List[List[Tuple]] or None (default) +Rows which do not match the filter predicate will be removed from scanned data. +Partition keys embedded in a nested directory structure will be exploited to avoid +loading files at all if they contain no matching rows. If use_legacy_dataset is True, +filters can only reference partition keys and only a hive-style directory structure +is supported. When setting use_legacy_dataset to False, also within-file level filtering +and different partitioning schemes are supported. +More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html

+
+
+ None +
force_download + bool + +
+

If True then will always download a file even +if it is already on disk. Defaults to False.

+
+
+ False +
download_folder + str + +
+

The path, absolute or relative, where downloaded files are saved. +Defaults to download_folder as set in init

+
+
+ None +
+ + +
+ Source code in py_src/fusion/fusion.py +
 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
def to_table(  # noqa: PLR0913
+    self,
+    dataset: str,
+    dt_str: str = "latest",
+    dataset_format: str = "parquet",
+    catalog: str | None = None,
+    n_par: int | None = None,
+    show_progress: bool = True,
+    columns: list[str] | None = None,
+    filters: PyArrowFilterT | None = None,
+    force_download: bool = False,
+    download_folder: str | None = None,
+    **kwargs: Any,
+) -> pa.Table:
+    """Gets distributions for a specified date or date range and returns the data as an arrow table.
+
+    Args:
+        dataset (str): A dataset identifier
+        dt_str (str, optional): Either a single date or a range identified by a start or end date,
+            or both separated with a ":". Defaults to 'latest' which will return the most recent
+            instance of the dataset.
+        dataset_format (str, optional): The file format, e.g. CSV or Parquet. Defaults to 'parquet'.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        n_par (int, optional): Specify how many distributions to download in parallel.
+            Defaults to all cpus available.
+        show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+        columns (List, optional): A list of columns to return from a parquet file. Defaults to None
+        filters (List, optional): List[Tuple] or List[List[Tuple]] or None (default)
+            Rows which do not match the filter predicate will be removed from scanned data.
+            Partition keys embedded in a nested directory structure will be exploited to avoid
+            loading files at all if they contain no matching rows. If use_legacy_dataset is True,
+            filters can only reference partition keys and only a hive-style directory structure
+            is supported. When setting use_legacy_dataset to False, also within-file level filtering
+            and different partitioning schemes are supported.
+            More on https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
+        force_download (bool, optional): If True then will always download a file even
+            if it is already on disk. Defaults to False.
+        download_folder (str, optional): The path, absolute or relative, where downloaded files are saved.
+            Defaults to download_folder as set in __init__
+    Returns:
+        class:`pyarrow.Table`: a dataframe containing the requested data.
+            If multiple dataset instances are retrieved then these are concatenated first.
+    """
+    catalog = self._use_catalog(catalog)
+    n_par = cpu_count(n_par)
+    if not download_folder:
+        download_folder = self.download_folder
+    download_res = self.download(
+        dataset,
+        dt_str,
+        dataset_format,
+        catalog,
+        n_par,
+        show_progress,
+        force_download,
+        download_folder,
+        return_paths=True,
+    )
+
+    if not download_res:
+        raise ValueError("Must specify 'return_paths=True' in download call to use this function")
+
+    if not all(res[0] for res in download_res):
+        failed_res = [res for res in download_res if not res[0]]
+        raise RuntimeError(
+            f"Not all downloads were successfully completed. "
+            f"Re-run to collect missing files. The following failed:\n{failed_res}"
+        )
+
+    files = [res[1] for res in download_res]
+
+    read_fn_map = {
+        "csv": csv_to_table,
+        "parquet": parquet_to_table,
+        "parq": parquet_to_table,
+        "json": json_to_table,
+        "raw": csv_to_table,
+    }
+
+    read_default_kwargs: dict[str, dict[str, object]] = {
+        "csv": {"columns": columns, "filters": filters, "fs": self.fs},
+        "parquet": {"columns": columns, "filters": filters, "fs": self.fs},
+        "json": {"columns": columns, "filters": filters, "fs": self.fs},
+        "raw": {"columns": columns, "filters": filters, "fs": self.fs},
+    }
+
+    read_default_kwargs["parq"] = read_default_kwargs["parquet"]
+
+    reader = read_fn_map.get(dataset_format)
+    read_kwargs = read_default_kwargs.get(dataset_format, {})
+    if not reader:
+        raise AssertionError(f"No function to read file in format {dataset_format}")
+
+    read_kwargs.update(kwargs)
+
+    if len(files) == 0:
+        raise APIResponseError(
+            f"No series members for dataset: {dataset} "
+            f"in date or date range: {dt_str} and format: {dataset_format}"
+        )
+    if dataset_format in ["parquet", "parq"]:
+        tbl = reader(files, **read_kwargs)  # type: ignore
+    else:
+        tbl = (reader(f, **read_kwargs) for f in files)  # type: ignore
+        tbl = pa.concat_tables(tbl)
+
+    return tbl
+
+
+
+ +
+ +
+ + +

+ upload(path, dataset=None, dt_str='latest', catalog=None, n_par=None, show_progress=True, return_paths=False, multipart=True, chunk_size=5 * 2 ** 20, from_date=None, to_date=None, preserve_original_name=False, additional_headers=None) + +

+ + +
+ +

Uploads the requested files/files to Fusion.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
path + str + +
+

path to a file or a folder with files

+
+
+ required +
dataset + str + +
+

Dataset identifier to which the file will be uploaded (for single file only). + If not provided the dataset will be implied from file's name.

+
+
+ None +
dt_str + str + +
+

A file name. Can be any string but is usually a date. + Defaults to 'latest' which will return the most recent. + Relevant for a single file upload only. If not provided the dataset will + be implied from file's name.

+
+
+ 'latest' +
catalog + str + +
+

A catalog identifier. Defaults to 'common'.

+
+
+ None +
n_par + int + +
+

Specify how many distributions to download in parallel. +Defaults to all cpus available.

+
+
+ None +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
return_paths + bool + +
+

Return paths and success statuses of the downloaded files.

+
+
+ False +
multipart + bool + +
+

Is multipart upload.

+
+
+ True +
chunk_size + int + +
+

Maximum chunk size.

+
+
+ 5 * 2 ** 20 +
from_date + str + +
+

start of the data date range contained in the distribution, +defaults to upoad date

+
+
+ None +
to_date + str + +
+

end of the data date range contained in the distribution, +defaults to upload date.

+
+
+ None +
preserve_original_name + bool + +
+

Preserve the original name of the file. Defaults to False.

+
+
+ False +
+ + +
+ Source code in py_src/fusion/fusion.py +
1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
def upload(  # noqa: PLR0913
+    self,
+    path: str,
+    dataset: str | None = None,
+    dt_str: str = "latest",
+    catalog: str | None = None,
+    n_par: int | None = None,
+    show_progress: bool = True,
+    return_paths: bool = False,
+    multipart: bool = True,
+    chunk_size: int = 5 * 2**20,
+    from_date: str | None = None,
+    to_date: str | None = None,
+    preserve_original_name: bool | None = False,
+    additional_headers: dict[str, str] | None = None,
+) -> list[tuple[bool, str, str | None]] | None:
+    """Uploads the requested files/files to Fusion.
+
+    Args:
+        path (str): path to a file or a folder with files
+        dataset (str, optional): Dataset identifier to which the file will be uploaded (for single file only).
+                                If not provided the dataset will be implied from file's name.
+        dt_str (str, optional): A file name. Can be any string but is usually a date.
+                                Defaults to 'latest' which will return the most recent.
+                                Relevant for a single file upload only. If not provided the dataset will
+                                be implied from file's name.
+        catalog (str, optional): A catalog identifier. Defaults to 'common'.
+        n_par (int, optional): Specify how many distributions to download in parallel.
+            Defaults to all cpus available.
+        show_progress (bool, optional): Display a progress bar during data download Defaults to True.
+        return_paths (bool, optional): Return paths and success statuses of the downloaded files.
+        multipart (bool, optional): Is multipart upload.
+        chunk_size (int, optional): Maximum chunk size.
+        from_date (str, optional): start of the data date range contained in the distribution,
+            defaults to upoad date
+        to_date (str, optional): end of the data date range contained in the distribution,
+            defaults to upload date.
+        preserve_original_name (bool, optional): Preserve the original name of the file. Defaults to False.
+
+    Returns:
+
+
+    """
+    catalog = self._use_catalog(catalog)
+
+    if not self.fs.exists(path):
+        raise RuntimeError("The provided path does not exist")
+
+    fs_fusion = self.get_fusion_filesystem()
+    if self.fs.info(path)["type"] == "directory":
+        file_path_lst = self.fs.find(path)
+        local_file_validation = validate_file_names(file_path_lst, fs_fusion)
+        file_path_lst = [f for flag, f in zip(local_file_validation, file_path_lst) if flag]
+        file_name = [f.split("/")[-1] for f in file_path_lst]
+        is_raw_lst = is_dataset_raw(file_path_lst, fs_fusion)
+        local_url_eqiv = [path_to_url(i, r) for i, r in zip(file_path_lst, is_raw_lst)]
+    else:
+        file_path_lst = [path]
+        if not catalog or not dataset:
+            local_file_validation = validate_file_names(file_path_lst, fs_fusion)
+            file_path_lst = [f for flag, f in zip(local_file_validation, file_path_lst) if flag]
+            is_raw_lst = is_dataset_raw(file_path_lst, fs_fusion)
+            local_url_eqiv = [path_to_url(i, r) for i, r in zip(file_path_lst, is_raw_lst)]
+            if preserve_original_name:
+                raise ValueError("preserve_original_name can only be used when catalog and dataset are provided.")
+        else:
+            date_identifier = re.compile(r"^(\d{4})(\d{2})(\d{2})$")
+            if date_identifier.match(dt_str):
+                dt_str = dt_str if dt_str != "latest" else pd.Timestamp("today").date().strftime("%Y%m%d")
+                dt_str = pd.Timestamp(dt_str).date().strftime("%Y%m%d")
+
+            if catalog not in fs_fusion.ls("") or dataset not in [
+                i.split("/")[-1] for i in fs_fusion.ls(f"{catalog}/datasets")
+            ]:
+                msg = (
+                    f"File file has not been uploaded, one of the catalog: {catalog} "
+                    f"or dataset: {dataset} does not exit."
+                )
+                warnings.warn(msg, stacklevel=2)
+                return [(False, path, msg)]
+            file_format = path.split(".")[-1]
+            file_name = [path.split("/")[-1]]
+            file_format = "raw" if file_format not in RECOGNIZED_FORMATS else file_format
+
+            local_url_eqiv = [
+                "/".join(distribution_to_url("", dataset, dt_str, file_format, catalog, False).split("/")[1:])
+            ]
+
+    if not preserve_original_name:
+        data_map_df = pd.DataFrame([file_path_lst, local_url_eqiv]).T
+        data_map_df.columns = pd.Index(["path", "url"])
+    else:
+        data_map_df = pd.DataFrame([file_path_lst, local_url_eqiv, file_name]).T
+        data_map_df.columns = pd.Index(["path", "url", "file_name"])
+
+    n_par = cpu_count(n_par)
+    parallel = len(data_map_df) > 1
+    res = upload_files(
+        fs_fusion,
+        self.fs,
+        data_map_df,
+        parallel=parallel,
+        n_par=n_par,
+        multipart=multipart,
+        chunk_size=chunk_size,
+        show_progress=show_progress,
+        from_date=from_date,
+        to_date=to_date,
+        additional_headers=additional_headers,
+    )
+
+    if not all(r[0] for r in res):
+        failed_res = [r for r in res if not r[0]]
+        msg = f"Not all uploads were successfully completed. The following failed:\n{failed_res}"
+        logger.warning(msg)
+        warnings.warn(msg, stacklevel=2)
+
+    return res if return_paths else None
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ +

Synchronisation between the local filesystem and Fusion.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fs_fusion + filesystem + +
+

Fusion filesystem.

+
+
+ required +
fs_local + filesystem + +
+

Local filesystem.

+
+
+ required +
products + list + +
+

List of products.

+
+
+ None +
datasets + list + +
+

List of datasets.

+
+
+ None +
catalog + str + +
+

Fusion catalog.

+
+
+ None +
direction + str + +
+

Direction of synchronisation: upload/download.

+
+
+ 'upload' +
flatten + bool + +
+

Flatten the folder structure.

+
+
+ False +
dataset_format + str + +
+

Dataset format for upload/download.

+
+
+ None +
n_par + int + +
+

Specify how many distributions to download in parallel. Defaults to all.

+
+
+ None +
show_progress + bool + +
+

Display a progress bar during data download Defaults to True.

+
+
+ True +
local_path + str + +
+

path to files in the local filesystem, e.g., "s3a://my_bucket/"

+
+
+ '' +
log_level + int + +
+

Logging level. Error level by default.

+
+
+ ERROR +
log_path + str + +
+

The folder path where the log is stored. Defaults to ".".

+
+
+ '.' +
+ + +
+ Source code in py_src/fusion/fs_sync.py +
244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
def fsync(  # noqa: PLR0913
+    fs_fusion: fsspec.filesystem,
+    fs_local: fsspec.filesystem,
+    products: Optional[list[str]] = None,
+    datasets: Optional[list[str]] = None,
+    catalog: Optional[str] = None,
+    direction: str = "upload",
+    flatten: bool = False,
+    dataset_format: Optional[str] = None,
+    n_par: Optional[int] = None,
+    show_progress: bool = True,
+    local_path: str = "",
+    log_level: int = logging.ERROR,
+    log_path: str = ".",
+) -> None:
+    """Synchronisation between the local filesystem and Fusion.
+
+    Args:
+        fs_fusion (fsspec.filesystem): Fusion filesystem.
+        fs_local (fsspec.filesystem): Local filesystem.
+        products (list): List of products.
+        datasets (list): List of datasets.
+        catalog (str): Fusion catalog.
+        direction (str): Direction of synchronisation: upload/download.
+        flatten (bool): Flatten the folder structure.
+        dataset_format (str): Dataset format for upload/download.
+        n_par (int, optional): Specify how many distributions to download in parallel. Defaults to all.
+        show_progress (bool): Display a progress bar during data download Defaults to True.
+        local_path (str): path to files in the local filesystem, e.g., "s3a://my_bucket/"
+        log_level (int): Logging level. Error level by default.
+        log_path (str): The folder path where the log is stored. Defaults to ".".
+
+    Returns:
+
+    """
+
+    if logger.hasHandlers():
+        logger.handlers.clear()
+    file_handler = logging.FileHandler(filename="{}/{}".format(log_path, "fusion_fsync.log"))
+    logging.addLevelName(VERBOSE_LVL, "VERBOSE")
+    stdout_handler = logging.StreamHandler(sys.stdout)
+    formatter = logging.Formatter(
+        "%(asctime)s.%(msecs)03d %(name)s:%(levelname)s %(message)s",
+        datefmt="%Y-%m-%d %H:%M:%S",
+    )
+    stdout_handler.setFormatter(formatter)
+    logger.addHandler(stdout_handler)
+    logger.addHandler(file_handler)
+    logger.setLevel(log_level)
+
+    catalog = catalog if catalog else "common"
+    datasets = datasets if datasets else []
+    products = products if products else []
+
+    assert len(products) > 0 or len(datasets) > 0, "At least one list products or datasets should be non-empty."
+    assert direction in [
+        "upload",
+        "download",
+    ], "The direction must be either upload or download."
+
+    if len(local_path) > 0 and local_path[-1] != "/":
+        local_path += "/"
+
+    for product in products:
+        res = json.loads(fs_fusion.cat(f"{catalog}/products/{product}").decode())
+        datasets += [r["identifier"] for r in res["resources"]]
+
+    assert len(datasets) > 0, "The supplied products did not contain any datasets."
+
+    local_state = pd.DataFrame()
+    fusion_state = pd.DataFrame()
+    while True:
+        try:
+            local_state_temp = _get_local_state(
+                fs_local,
+                fs_fusion,
+                datasets,
+                catalog,
+                dataset_format,
+                local_state,
+                local_path,
+            )
+            fusion_state_temp = _get_fusion_df(fs_fusion, datasets, catalog, flatten, dataset_format)
+            if not local_state_temp.equals(local_state) or not fusion_state_temp.equals(fusion_state):
+                res = _synchronize(
+                    fs_fusion,
+                    fs_local,
+                    local_state_temp,
+                    fusion_state_temp,
+                    direction,
+                    n_par,
+                    show_progress,
+                    local_path,
+                )
+                if len(res) == 0 or all(i[0] for i in res):
+                    local_state = local_state_temp
+                    fusion_state = fusion_state_temp
+
+                if not all(r[0] for r in res):
+                    failed_res = [r for r in res if not r[0]]
+                    msg = f"Not all {direction}s were successfully completed. The following failed:\n{failed_res}"
+                    errs = [r for r in res if not r[2]]
+                    logger.warning(msg)
+                    logger.warning(errs)
+                    warnings.warn(msg, stacklevel=2)
+
+            else:
+                logger.info("All synced, sleeping")
+                time.sleep(10)
+
+        except KeyboardInterrupt:  # noqa: PERF203
+            if input("Type exit to exit: ") != "exit":
+                continue
+            break
+
+        except Exception as _:
+            logger.error("Exception thrown", exc_info=True)
+            continue
+
+
+
+ +
+ +
+ + + + +
+ +

Fusion Product class and functions.

+ + + +
+ + + + + + + + +
+ + + +

+ Product + + + + dataclass + + +

+ + +
+ + +

Fusion Product class for managing product metadata in a Fusion catalog.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
identifier + str + +
+

A unique identifier for the product.

+
+
title + str + +
+

Product title. Defaults to "".

+
+
category + str | list[str] | None + +
+

Product category. Defaults to None.

+
+
short_abstract + str + +
+

Short abstract of the product. Defaults to "".

+
+
description + str + +
+

Product description. If not provided, defaults to identifier.

+
+
is_active + bool + +
+

Boolean for Active status. Defaults to True.

+
+
is_restricted + bool | None + +
+

Flag for restricted products. Defaults to None.

+
+
maintainer + str | list[str] | None + +
+

Product maintainer. Defaults to None.

+
+
region + str | list[str] | None + +
+

Product region. Defaults to None.

+
+
publisher + str | None + +
+

Name of vendor that publishes the data. Defaults to None.

+
+
sub_category + str | list[str] | None + +
+

Product sub-category. Defaults to None.

+
+
tag + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
delivery_channel + str | list[str] + +
+

Product delivery channel. Defaults to ["API"].

+
+
theme + str | None + +
+

Product theme. Defaults to None.

+
+
release_date + str | None + +
+

Product release date. Defaults to None.

+
+
language + str + +
+

Product language. Defaults to "English".

+
+
status + str + +
+

Product status. Defaults to "Available".

+
+
image + str + +
+

Product image. Defaults to "".

+
+
logo + str + +
+

Product logo. Defaults to "".

+
+
dataset + str | list[str] | None + +
+

Product datasets. Defaults to None.

+
+
_client + Any + +
+

Fusion client object. Defaults to None.

+
+
+ +
+ Source code in py_src/fusion/product.py +
 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
@dataclass
+class Product(metaclass=CamelCaseMeta):
+    """Fusion Product class for managing product metadata in a Fusion catalog.
+
+    Attributes:
+        identifier (str): A unique identifier for the product.
+        title (str, optional): Product title. Defaults to "".
+        category (str | list[str] | None, optional): Product category. Defaults to None.
+        short_abstract (str, optional): Short abstract of the product. Defaults to "".
+        description (str, optional): Product description. If not provided, defaults to identifier.
+        is_active (bool, optional): Boolean for Active status. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted products. Defaults to None.
+        maintainer (str | list[str] | None, optional): Product maintainer. Defaults to None.
+        region (str | list[str] | None, optional): Product region. Defaults to None.
+        publisher (str | None, optional): Name of vendor that publishes the data. Defaults to None.
+        sub_category (str | list[str] | None, optional): Product sub-category. Defaults to None.
+        tag (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        delivery_channel (str | list[str], optional): Product delivery channel. Defaults to ["API"].
+        theme (str | None, optional): Product theme. Defaults to None.
+        release_date (str | None, optional): Product release date. Defaults to None.
+        language (str, optional): Product language. Defaults to "English".
+        status (str, optional): Product status. Defaults to "Available".
+        image (str, optional): Product image. Defaults to "".
+        logo (str, optional): Product logo. Defaults to "".
+        dataset (str | list[str] | None, optional): Product datasets. Defaults to None.
+        _client (Any, optional): Fusion client object. Defaults to None.
+
+    """
+
+    identifier: str
+    title: str = ""
+    category: str | list[str] | None = None
+    short_abstract: str = ""
+    description: str = ""
+    is_active: bool = True
+    is_restricted: bool | None = None
+    maintainer: str | list[str] | None = None
+    region: str | list[str]  = field(default_factory=lambda: ["Global"])
+    publisher: str = "J.P. Morgan"
+    sub_category: str | list[str] | None = None
+    tag: str | list[str] | None = None
+    delivery_channel: str | list[str] = field(default_factory=lambda: ["API"])
+    theme: str | None = None
+    release_date: str | None = None
+    language: str = "English"
+    status: str = "Available"
+    image: str = ""
+    logo: str = ""
+    dataset: str | list[str] | None = None
+
+    _client: Fusion | None = field(init=False, repr=False, compare=False, default=None)
+
+    def __repr__(self: Product) -> str:
+        """Return an object representation of the Product object.
+
+        Returns:
+            str: Object representaiton of the product.
+
+        """
+        attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+        return f"Product(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+    def __post_init__(self: Product) -> None:
+        """Format Product metadata fields after object instantiation."""
+        self.identifier = tidy_string(self.identifier).upper().replace(" ", "_")
+        self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+        self.description = tidy_string(self.description) if self.description != "" else self.title
+        self.short_abstract = tidy_string(self.short_abstract) if self.short_abstract != "" else self.title
+        self.description = tidy_string(self.description)
+        self.category = (
+            self.category if isinstance(self.category, list) or self.category is None else make_list(self.category)
+        )
+        self.tag = self.tag if isinstance(self.tag, list) or self.tag is None else make_list(self.tag)
+        self.dataset = (
+            self.dataset if isinstance(self.dataset, list) or self.dataset is None else make_list(self.dataset)
+        )
+        self.sub_category = (
+            self.sub_category
+            if isinstance(self.sub_category, list) or self.sub_category is None
+            else make_list(self.sub_category)
+        )
+        self.is_active = self.is_active if isinstance(self.is_active, bool) else make_bool(self.is_active)
+        self.is_restricted = (
+            self.is_restricted
+            if isinstance(self.is_restricted, bool) or self.is_restricted is None
+            else make_bool(self.is_restricted)
+        )
+        self.maintainer = (
+            self.maintainer
+            if isinstance(self.maintainer, list) or self.maintainer is None
+            else make_list(self.maintainer)
+        )
+        self.region = self.region if isinstance(self.region, list) or self.region is None else make_list(self.region)
+        self.delivery_channel = (
+            self.delivery_channel if isinstance(self.delivery_channel, list) else make_list(self.delivery_channel)
+        )
+        self.release_date = convert_date_format(self.release_date) if self.release_date else None
+
+    def __getattr__(self, name: str) -> Any:
+        # Redirect attribute access to the snake_case version
+        snake_name = camel_to_snake(name)
+        if snake_name in self.__dict__:
+            return self.__dict__[snake_name]
+        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
+
+    def __setattr__(self, name: str, value: Any) -> None:
+        if name == "client":
+            # Use the property setter for client
+            object.__setattr__(self, name, value)
+        else:
+            snake_name = camel_to_snake(name)
+            self.__dict__[snake_name] = value
+
+    @property
+    def client(self) -> Fusion | None:
+        """Return the client."""
+        return self._client
+
+    @client.setter
+    def client(self, client: Fusion | None) -> None:
+        """Set the client for the Product. Set automatically, if the Product is instantiated from a Fusion object.
+
+        Args:
+            client (Any): Fusion client object.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product")
+            >>> product.client = fusion
+
+        """
+        self._client = client
+
+    def _use_client(self, client: Fusion | None) -> Fusion:
+        """Determine client."""
+
+        res = self._client if client is None else client
+        if res is None:
+            raise ValueError("A Fusion client object is required.")
+        return res
+
+    @classmethod
+    def _from_series(cls: type[Product], series: pd.Series[Any]) -> Product:
+        """Instantiate a Product object from a pandas Series.
+
+        Args:
+            series (pd.Series[Any]): Product metadata as a pandas Series.
+
+        Returns:
+            Product: Product object.
+
+        """
+        series = series.rename(lambda x: x.replace(" ", "").replace("_", "").lower())
+        series = series.rename({"tag": "tags", "dataset": "datasets"})
+        short_abstract = series.get("abstract", "")
+        short_abstract = series.get("shortabstract", "") if short_abstract is None else short_abstract
+
+        return cls(
+            title=series.get("title", ""),
+            identifier=series.get("identifier", ""),
+            category=series.get("category", None),
+            short_abstract=short_abstract,
+            description=series.get("description", ""),
+            theme=series.get("theme", None),
+            release_date=series.get("releasedate", None),
+            is_active=series.get("isactive", True),
+            is_restricted=series.get("isrestricted", None),
+            maintainer=series.get("maintainer", None),
+            region=series.get("region", "Global"),
+            publisher=series.get("publisher", "J.P. Morgan"),
+            sub_category=series.get("subcategory", None),
+            tag=series.get("tags", None),
+            delivery_channel=series.get("deliverychannel", "API"),
+            language=series.get("language", "English"),
+            status=series.get("status", "Available"),
+            dataset=series.get("datasets", None),
+        )
+
+    @classmethod
+    def _from_dict(cls: type[Product], data: dict[str, Any]) -> Product:
+        """Instantiate a Product object from a dictionary.
+
+        Args:
+            data (dict[str, Any]): Product metadata as a dictionary.
+
+        Returns:
+            Product: Product object.
+
+        """
+        keys = [f.name for f in fields(cls)]
+        data = {camel_to_snake(k): v for k, v in data.items()}
+        data = {k: v for k, v in data.items() if k in keys}
+        return cls(**data)
+
+    @classmethod
+    def _from_csv(cls: type[Product], file_path: str, identifier: str | None = None) -> Product:
+        """Instantiate a Product object from a CSV file.
+
+        Args:
+            file_path (str): Path to the CSV file.
+            identifier (str | None, optional): Product identifer for filtering if multipler products are defined in csv.
+                Defaults to None.
+
+        Returns:
+            Product: Product object.
+
+        """
+        data = pd.read_csv(file_path)
+
+        return (
+            Product._from_series(data[data["identifier"] == identifier].reset_index(drop=True).iloc[0])
+            if identifier
+            else Product._from_series(data.reset_index(drop=True).iloc[0])
+        )
+
+    def from_object(
+        self,
+        product_source: Product | dict[str, Any] | str | pd.Series[Any],
+    ) -> Product:
+        """Instantiate a Product object from a Product object, dictionary, path to CSV, JSON string, or pandas Series.
+
+        Args:
+            product_source (Product | dict[str, Any] | str | pd.Series[Any]): Product metadata source.
+
+        Raises:
+            TypeError: If the object provided is not a Product, dictionary, path to CSV file, JSON string,
+            or pandas Series.
+
+        Returns:
+            Product: Product object.
+
+        Examples:
+            Instantiating a Product object from a dictionary:
+
+            >>> from fusion import Fusion
+            >>> from fusion.product import Product
+            >>> fusion = Fusion()
+            >>> product_dict = {
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data",
+            ...     "short_abstract": "My product is awesome",
+            ...     "description": "My product is very awesome",
+            ...     "is_active": True,
+            ...     "is_restricted": False,
+            ...     "maintainer": "My Company",
+            ...     "region": "Global",
+            ...     "publisher": "My Company",
+            ...     "sub_category": "Data",
+            ...     "tag": "My Company",
+            ...     "delivery_channel": "API",
+            ...     "theme": "Data",
+            ...     "release_date": "2021-01-01",
+            ...     "language": "English",
+            ...     "status": "Available"
+            ... }
+            >>> product = fusion.product("my_product").from_object(product_dict)
+
+            Instantiating a Product object from a JSON string:
+
+            >>> from fusion import Fusion
+            >>> from fusion.product import Product
+            >>> fusion = Fusion()
+            >>> product_json = '{
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data",
+            ...     "short_abstract": "My product is awesome",
+            ...     "description": "My product is very awesome",
+            ...     "is_active": True,
+            ...     "is_restricted": False,
+            ...     "maintainer": "My Company",
+            ...     "region": "Global",
+            ...     "publisher": "My Company",
+            ...     "sub_category": "Data",
+            ...     "tag": "My Company",
+            ...     "delivery_channel": "API",
+            ...     "theme": "Data",
+            ...     "release_date": "2021-01-01",
+            ...     "language": "English",
+            ...     "status": "Available",
+            ... }'
+            >>> product = fusion.product("my_product").from_object(product_json)
+
+            Instantiating a Product object from a CSV file:
+
+            >>> from fusion import Fusion
+            >>> from fusion.product import Product
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product").from_object("path/to/product.csv")
+
+            Instantiating a Product object from a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> from fusion.product import Product
+            >>> fusion = Fusion()
+            >>> product_series = pd.Series({
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data",
+            ...     "short_abstract": "My product is awesome",
+            ...     "description": "My product is very awesome",
+            ...     "is_active": True,
+            ...     "is_restricted": False,
+            ...     "maintainer": "My Company",
+            ...     "region": "Global",
+            ...     "publisher": "My Company",
+            ...     "sub_category": "Data",
+            ...     "tag": "My Company",
+            ...     "delivery_channel": "API",
+            ...     "theme": "Data",
+            ...     "release_date": "2021-01-01",
+            ...     "language": "English",
+            ...     "status": "Available",
+            ... })
+            >>> product = fusion.product("my_product").from_object(product_series)
+
+        """
+        if isinstance(product_source, Product):
+            product = product_source
+        elif isinstance(product_source, dict):
+            product = Product._from_dict(product_source)
+        elif isinstance(product_source, str):
+            if _is_json(product_source):
+                product = Product._from_dict(js.loads(product_source))
+            else:
+                product = Product._from_csv(product_source)
+        elif isinstance(product_source, pd.Series):
+            product = Product._from_series(product_source)
+        else:
+            raise TypeError(f"Could not resolve the object provided: {product_source}")
+        product.client = self._client
+        return product
+
+    def from_catalog(self, catalog: str | None = None, client: Fusion | None = None) -> Product:
+        """Instantiate a Product object from a Fusion catalog.
+
+        Args:
+            catalog (str | None, optional): Catalog identifer. Defaults to None.
+            client (Fusion | None, optional): Fusion session. Defaults to None.
+                If instantiated from a Fusion object, then the client is set automatically.
+
+        Returns:
+            Product: Product object.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        resp = client.session.get(f"{client.root_url}catalogs/{catalog}/products")
+        requests_raise_for_status(resp)
+        list_products = resp.json()["resources"]
+        dict_ = [dict_ for dict_ in list_products if dict_["identifier"] == self.identifier][0]
+        product_obj = Product._from_dict(dict_)
+        product_obj.client = client
+
+        return product_obj
+
+    def to_dict(self: Product) -> dict[str, Any]:
+        """Convert the Product instance to a dictionary.
+
+        Returns:
+            dict[str, Any]: Product metadata as a dictionary.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product")
+            >>> product_dict = product.to_dict()
+
+        """
+        product_dict = {
+            snake_to_camel(k): v
+            for k, v in self.__dict__.items()
+            if not k.startswith("_")
+        }
+        return product_dict
+
+    def create(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Upload a new product to a Fusion catalog.
+
+        Args:
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            From scratch:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product(
+            ...     identifer="my_product"
+            ...     title="My Product",
+            ...     category="Data",
+            ...     short_abstract="My product is awesome",
+            ...     description="My product is very awesome",
+            ...     )
+            >>> product.create(catalog="my_catalog")
+
+            From a dictionary:
+
+            >>> product_dict = {
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data"
+            ...     }
+            >>> product = fusion.product("my_product").from_object(product_dict)
+            >>> product.create(catalog="my_catalog")
+
+            From a JSON string:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product_json = '{
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data"
+            ...     }'
+            >>> product = fusion.product("my_product").from_object(product_json)
+            >>> product.create(catalog="my_catalog")
+
+            From a CSV file:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product").from_object("path/to/product.csv")
+            >>> product.create(catalog="my_catalog")
+
+            From a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product_series = pd.Series({
+            ...     "identifier": "my_product",
+            ...     "title": "My Product",
+            ...     "category": "Data"
+            ...     })
+            >>> product = fusion.product("my_product").from_object(product_series)
+            >>> product.create(catalog="my_catalog")
+
+            From existing product in a catalog:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product").from_catalog()
+            >>> product.identifier = "my_new_product"
+            >>> product.create(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        release_date = self.release_date if self.release_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+        delivery_channel = self.delivery_channel if self.delivery_channel else ["API"]
+
+        self.release_date = release_date
+        self.delivery_channel = delivery_channel
+
+        data = self.to_dict()
+
+        url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+        resp: requests.Response = client.session.post(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def update(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Update an existing product in a Fusion catalog.
+
+        Args:
+            client (Fusion): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+            >>> product.title = "My Updated Product Title"
+            >>> product.update(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        release_date = self.release_date if self.release_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+        delivery_channel = self.delivery_channel if self.delivery_channel else ["API"]
+
+        self.release_date = release_date
+        self.delivery_channel = delivery_channel
+
+        data = self.to_dict()
+
+        url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+        resp: requests.Response = client.session.put(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def delete(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Delete a product from a Fusion catalog.
+
+        Args:
+            client (Fusion): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+         Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.product("my_product").delete(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+        resp: requests.Response = client.session.delete(url)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def copy(
+        self,
+        catalog_to: str,
+        catalog_from: str | None = None,
+        client: Fusion | None = None,
+        client_to: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Copy product from one Fusion catalog and/or environment to another by copy.
+
+        Args:
+            catalog_to (str): Catalog identifier to which to copy product.
+            catalog_from (str, optional): A catalog identifier from which to copy product. Defaults to "common".
+            client (Fusion): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            client_to (Fusion | None, optional): Fusion client object. Defaults to current instance.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.product("my_product").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog_from = client._use_catalog(catalog_from)
+        if client_to is None:
+            client_to = client
+        product_obj = self.from_catalog(catalog=catalog_from, client=client)
+        product_obj.client = client_to
+        resp = product_obj.create(catalog=catalog_to, return_resp_obj=True)
+        return resp if return_resp_obj else None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ client: Fusion | None + + + property + writable + + +

+ + +
+ +

Return the client.

+
+ +
+ + + +
+ + +

+ __post_init__() + +

+ + +
+ +

Format Product metadata fields after object instantiation.

+ +
+ Source code in py_src/fusion/product.py +
 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
def __post_init__(self: Product) -> None:
+    """Format Product metadata fields after object instantiation."""
+    self.identifier = tidy_string(self.identifier).upper().replace(" ", "_")
+    self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+    self.description = tidy_string(self.description) if self.description != "" else self.title
+    self.short_abstract = tidy_string(self.short_abstract) if self.short_abstract != "" else self.title
+    self.description = tidy_string(self.description)
+    self.category = (
+        self.category if isinstance(self.category, list) or self.category is None else make_list(self.category)
+    )
+    self.tag = self.tag if isinstance(self.tag, list) or self.tag is None else make_list(self.tag)
+    self.dataset = (
+        self.dataset if isinstance(self.dataset, list) or self.dataset is None else make_list(self.dataset)
+    )
+    self.sub_category = (
+        self.sub_category
+        if isinstance(self.sub_category, list) or self.sub_category is None
+        else make_list(self.sub_category)
+    )
+    self.is_active = self.is_active if isinstance(self.is_active, bool) else make_bool(self.is_active)
+    self.is_restricted = (
+        self.is_restricted
+        if isinstance(self.is_restricted, bool) or self.is_restricted is None
+        else make_bool(self.is_restricted)
+    )
+    self.maintainer = (
+        self.maintainer
+        if isinstance(self.maintainer, list) or self.maintainer is None
+        else make_list(self.maintainer)
+    )
+    self.region = self.region if isinstance(self.region, list) or self.region is None else make_list(self.region)
+    self.delivery_channel = (
+        self.delivery_channel if isinstance(self.delivery_channel, list) else make_list(self.delivery_channel)
+    )
+    self.release_date = convert_date_format(self.release_date) if self.release_date else None
+
+
+
+ +
+ +
+ + +

+ __repr__() + +

+ + +
+ +

Return an object representation of the Product object.

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

Object representaiton of the product.

+
+
+ +
+ Source code in py_src/fusion/product.py +
81
+82
+83
+84
+85
+86
+87
+88
+89
def __repr__(self: Product) -> str:
+    """Return an object representation of the Product object.
+
+    Returns:
+        str: Object representaiton of the product.
+
+    """
+    attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+    return f"Product(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+
+
+ +
+ +
+ + +

+ copy(catalog_to, catalog_from=None, client=None, client_to=None, return_resp_obj=False) + +

+ + +
+ +

Copy product from one Fusion catalog and/or environment to another by copy.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog_to + str + +
+

Catalog identifier to which to copy product.

+
+
+ required +
catalog_from + str + +
+

A catalog identifier from which to copy product. Defaults to "common".

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
client_to + Fusion | None + +
+

Fusion client object. Defaults to current instance.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.product("my_product").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+ +
+ Source code in py_src/fusion/product.py +
585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
def copy(
+    self,
+    catalog_to: str,
+    catalog_from: str | None = None,
+    client: Fusion | None = None,
+    client_to: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Copy product from one Fusion catalog and/or environment to another by copy.
+
+    Args:
+        catalog_to (str): Catalog identifier to which to copy product.
+        catalog_from (str, optional): A catalog identifier from which to copy product. Defaults to "common".
+        client (Fusion): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        client_to (Fusion | None, optional): Fusion client object. Defaults to current instance.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.product("my_product").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog_from = client._use_catalog(catalog_from)
+    if client_to is None:
+        client_to = client
+    product_obj = self.from_catalog(catalog=catalog_from, client=client)
+    product_obj.client = client_to
+    resp = product_obj.create(catalog=catalog_to, return_resp_obj=True)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ create(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Upload a new product to a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
From scratch:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product(
+...     identifer="my_product"
+...     title="My Product",
+...     category="Data",
+...     short_abstract="My product is awesome",
+...     description="My product is very awesome",
+...     )
+>>> product.create(catalog="my_catalog")
+
+From a dictionary:
+
+>>> product_dict = {
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data"
+...     }
+>>> product = fusion.product("my_product").from_object(product_dict)
+>>> product.create(catalog="my_catalog")
+
+From a JSON string:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product_json = '{
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data"
+...     }'
+>>> product = fusion.product("my_product").from_object(product_json)
+>>> product.create(catalog="my_catalog")
+
+From a CSV file:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product").from_object("path/to/product.csv")
+>>> product.create(catalog="my_catalog")
+
+From a pandas Series:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product_series = pd.Series({
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data"
+...     })
+>>> product = fusion.product("my_product").from_object(product_series)
+>>> product.create(catalog="my_catalog")
+
+From existing product in a catalog:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product").from_catalog()
+>>> product.identifier = "my_new_product"
+>>> product.create(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/product.py +
413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
def create(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Upload a new product to a Fusion catalog.
+
+    Args:
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        From scratch:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product(
+        ...     identifer="my_product"
+        ...     title="My Product",
+        ...     category="Data",
+        ...     short_abstract="My product is awesome",
+        ...     description="My product is very awesome",
+        ...     )
+        >>> product.create(catalog="my_catalog")
+
+        From a dictionary:
+
+        >>> product_dict = {
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data"
+        ...     }
+        >>> product = fusion.product("my_product").from_object(product_dict)
+        >>> product.create(catalog="my_catalog")
+
+        From a JSON string:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product_json = '{
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data"
+        ...     }'
+        >>> product = fusion.product("my_product").from_object(product_json)
+        >>> product.create(catalog="my_catalog")
+
+        From a CSV file:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product").from_object("path/to/product.csv")
+        >>> product.create(catalog="my_catalog")
+
+        From a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product_series = pd.Series({
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data"
+        ...     })
+        >>> product = fusion.product("my_product").from_object(product_series)
+        >>> product.create(catalog="my_catalog")
+
+        From existing product in a catalog:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product").from_catalog()
+        >>> product.identifier = "my_new_product"
+        >>> product.create(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    release_date = self.release_date if self.release_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+    delivery_channel = self.delivery_channel if self.delivery_channel else ["API"]
+
+    self.release_date = release_date
+    self.delivery_channel = delivery_channel
+
+    data = self.to_dict()
+
+    url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+    resp: requests.Response = client.session.post(url, json=data)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ delete(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Delete a product from a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.product("my_product").delete(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/product.py +
553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
def delete(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Delete a product from a Fusion catalog.
+
+    Args:
+        client (Fusion): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+     Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.product("my_product").delete(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+    resp: requests.Response = client.session.delete(url)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ from_catalog(catalog=None, client=None) + +

+ + +
+ +

Instantiate a Product object from a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

Catalog identifer. Defaults to None.

+
+
+ None +
client + Fusion | None + +
+

Fusion session. Defaults to None. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Product + Product + +
+

Product object.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/product.py +
364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
def from_catalog(self, catalog: str | None = None, client: Fusion | None = None) -> Product:
+    """Instantiate a Product object from a Fusion catalog.
+
+    Args:
+        catalog (str | None, optional): Catalog identifer. Defaults to None.
+        client (Fusion | None, optional): Fusion session. Defaults to None.
+            If instantiated from a Fusion object, then the client is set automatically.
+
+    Returns:
+        Product: Product object.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    resp = client.session.get(f"{client.root_url}catalogs/{catalog}/products")
+    requests_raise_for_status(resp)
+    list_products = resp.json()["resources"]
+    dict_ = [dict_ for dict_ in list_products if dict_["identifier"] == self.identifier][0]
+    product_obj = Product._from_dict(dict_)
+    product_obj.client = client
+
+    return product_obj
+
+
+
+ +
+ +
+ + +

+ from_object(product_source) + +

+ + +
+ +

Instantiate a Product object from a Product object, dictionary, path to CSV, JSON string, or pandas Series.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
product_source + Product | dict[str, Any] | str | Series[Any] + +
+

Product metadata source.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TypeError + +
+

If the object provided is not a Product, dictionary, path to CSV file, JSON string,

+
+
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Product + Product + +
+

Product object.

+
+
+ + +

Examples:

+

Instantiating a Product object from a dictionary:

+
>>> from fusion import Fusion
+>>> from fusion.product import Product
+>>> fusion = Fusion()
+>>> product_dict = {
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data",
+...     "short_abstract": "My product is awesome",
+...     "description": "My product is very awesome",
+...     "is_active": True,
+...     "is_restricted": False,
+...     "maintainer": "My Company",
+...     "region": "Global",
+...     "publisher": "My Company",
+...     "sub_category": "Data",
+...     "tag": "My Company",
+...     "delivery_channel": "API",
+...     "theme": "Data",
+...     "release_date": "2021-01-01",
+...     "language": "English",
+...     "status": "Available"
+... }
+>>> product = fusion.product("my_product").from_object(product_dict)
+
+

Instantiating a Product object from a JSON string:

+
>>> from fusion import Fusion
+>>> from fusion.product import Product
+>>> fusion = Fusion()
+>>> product_json = '{
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data",
+...     "short_abstract": "My product is awesome",
+...     "description": "My product is very awesome",
+...     "is_active": True,
+...     "is_restricted": False,
+...     "maintainer": "My Company",
+...     "region": "Global",
+...     "publisher": "My Company",
+...     "sub_category": "Data",
+...     "tag": "My Company",
+...     "delivery_channel": "API",
+...     "theme": "Data",
+...     "release_date": "2021-01-01",
+...     "language": "English",
+...     "status": "Available",
+... }'
+>>> product = fusion.product("my_product").from_object(product_json)
+
+

Instantiating a Product object from a CSV file:

+
>>> from fusion import Fusion
+>>> from fusion.product import Product
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product").from_object("path/to/product.csv")
+
+

Instantiating a Product object from a pandas Series:

+
>>> from fusion import Fusion
+>>> from fusion.product import Product
+>>> fusion = Fusion()
+>>> product_series = pd.Series({
+...     "identifier": "my_product",
+...     "title": "My Product",
+...     "category": "Data",
+...     "short_abstract": "My product is awesome",
+...     "description": "My product is very awesome",
+...     "is_active": True,
+...     "is_restricted": False,
+...     "maintainer": "My Company",
+...     "region": "Global",
+...     "publisher": "My Company",
+...     "sub_category": "Data",
+...     "tag": "My Company",
+...     "delivery_channel": "API",
+...     "theme": "Data",
+...     "release_date": "2021-01-01",
+...     "language": "English",
+...     "status": "Available",
+... })
+>>> product = fusion.product("my_product").from_object(product_series)
+
+ +
+ Source code in py_src/fusion/product.py +
245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
def from_object(
+    self,
+    product_source: Product | dict[str, Any] | str | pd.Series[Any],
+) -> Product:
+    """Instantiate a Product object from a Product object, dictionary, path to CSV, JSON string, or pandas Series.
+
+    Args:
+        product_source (Product | dict[str, Any] | str | pd.Series[Any]): Product metadata source.
+
+    Raises:
+        TypeError: If the object provided is not a Product, dictionary, path to CSV file, JSON string,
+        or pandas Series.
+
+    Returns:
+        Product: Product object.
+
+    Examples:
+        Instantiating a Product object from a dictionary:
+
+        >>> from fusion import Fusion
+        >>> from fusion.product import Product
+        >>> fusion = Fusion()
+        >>> product_dict = {
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data",
+        ...     "short_abstract": "My product is awesome",
+        ...     "description": "My product is very awesome",
+        ...     "is_active": True,
+        ...     "is_restricted": False,
+        ...     "maintainer": "My Company",
+        ...     "region": "Global",
+        ...     "publisher": "My Company",
+        ...     "sub_category": "Data",
+        ...     "tag": "My Company",
+        ...     "delivery_channel": "API",
+        ...     "theme": "Data",
+        ...     "release_date": "2021-01-01",
+        ...     "language": "English",
+        ...     "status": "Available"
+        ... }
+        >>> product = fusion.product("my_product").from_object(product_dict)
+
+        Instantiating a Product object from a JSON string:
+
+        >>> from fusion import Fusion
+        >>> from fusion.product import Product
+        >>> fusion = Fusion()
+        >>> product_json = '{
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data",
+        ...     "short_abstract": "My product is awesome",
+        ...     "description": "My product is very awesome",
+        ...     "is_active": True,
+        ...     "is_restricted": False,
+        ...     "maintainer": "My Company",
+        ...     "region": "Global",
+        ...     "publisher": "My Company",
+        ...     "sub_category": "Data",
+        ...     "tag": "My Company",
+        ...     "delivery_channel": "API",
+        ...     "theme": "Data",
+        ...     "release_date": "2021-01-01",
+        ...     "language": "English",
+        ...     "status": "Available",
+        ... }'
+        >>> product = fusion.product("my_product").from_object(product_json)
+
+        Instantiating a Product object from a CSV file:
+
+        >>> from fusion import Fusion
+        >>> from fusion.product import Product
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product").from_object("path/to/product.csv")
+
+        Instantiating a Product object from a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> from fusion.product import Product
+        >>> fusion = Fusion()
+        >>> product_series = pd.Series({
+        ...     "identifier": "my_product",
+        ...     "title": "My Product",
+        ...     "category": "Data",
+        ...     "short_abstract": "My product is awesome",
+        ...     "description": "My product is very awesome",
+        ...     "is_active": True,
+        ...     "is_restricted": False,
+        ...     "maintainer": "My Company",
+        ...     "region": "Global",
+        ...     "publisher": "My Company",
+        ...     "sub_category": "Data",
+        ...     "tag": "My Company",
+        ...     "delivery_channel": "API",
+        ...     "theme": "Data",
+        ...     "release_date": "2021-01-01",
+        ...     "language": "English",
+        ...     "status": "Available",
+        ... })
+        >>> product = fusion.product("my_product").from_object(product_series)
+
+    """
+    if isinstance(product_source, Product):
+        product = product_source
+    elif isinstance(product_source, dict):
+        product = Product._from_dict(product_source)
+    elif isinstance(product_source, str):
+        if _is_json(product_source):
+            product = Product._from_dict(js.loads(product_source))
+        else:
+            product = Product._from_csv(product_source)
+    elif isinstance(product_source, pd.Series):
+        product = Product._from_series(product_source)
+    else:
+        raise TypeError(f"Could not resolve the object provided: {product_source}")
+    product.client = self._client
+    return product
+
+
+
+ +
+ +
+ + +

+ to_dict() + +

+ + +
+ +

Convert the Product instance to a dictionary.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, Any] + +
+

dict[str, Any]: Product metadata as a dictionary.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product")
+>>> product_dict = product.to_dict()
+
+ +
+ Source code in py_src/fusion/product.py +
393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
def to_dict(self: Product) -> dict[str, Any]:
+    """Convert the Product instance to a dictionary.
+
+    Returns:
+        dict[str, Any]: Product metadata as a dictionary.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product")
+        >>> product_dict = product.to_dict()
+
+    """
+    product_dict = {
+        snake_to_camel(k): v
+        for k, v in self.__dict__.items()
+        if not k.startswith("_")
+    }
+    return product_dict
+
+
+
+ +
+ +
+ + +

+ update(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Update an existing product in a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+>>> product.title = "My Updated Product Title"
+>>> product.update(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/product.py +
511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
def update(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Update an existing product in a Fusion catalog.
+
+    Args:
+        client (Fusion): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> product = fusion.product("my_product").from_catalog(catalog="my_catalog")
+        >>> product.title = "My Updated Product Title"
+        >>> product.update(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    release_date = self.release_date if self.release_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+    delivery_channel = self.delivery_channel if self.delivery_channel else ["API"]
+
+    self.release_date = release_date
+    self.delivery_channel = delivery_channel
+
+    data = self.to_dict()
+
+    url = f"{client.root_url}catalogs/{catalog}/products/{self.identifier}"
+    resp: requests.Response = client.session.put(url, json=data)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ +

Fusion Dataset class and functions.

+ + + +
+ + + + + + + + +
+ + + +

+ Dataset + + + + dataclass + + +

+ + +
+ + +

Fusion Dataset class for managing dataset metadata in a Fusion catalog.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
identifier + str + +
+

A unique identifier for the dataset.

+
+
title + str + +
+

A title for the dataset. If not provided, defaults to identifier.

+
+
category + str | list[str] | None + +
+

A category or list of categories for the dataset. Defaults to None.

+
+
description + str + +
+

A description of the dataset. If not provided, defaults to identifier.

+
+
frequency + str + +
+

The frequency of the dataset. Defaults to "Once".

+
+
is_internal_only_dataset + bool + +
+

Flag for internal datasets. Defaults to False.

+
+
is_third_party_data + bool + +
+

Flag for third party data. Defaults to True.

+
+
is_restricted + bool | None + +
+

Flag for restricted datasets. Defaults to None.

+
+
is_raw_data + bool + +
+

Flag for raw datasets. Defaults to True.

+
+
maintainer + str | None + +
+

Dataset maintainer. Defaults to "J.P. Morgan Fusion".

+
+
source + str | list[str] | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
region + str | list[str] | None + +
+

Region. Defaults to None.

+
+
publisher + str + +
+

Name of vendor that publishes the data. Defaults to "J.P. Morgan".

+
+
product + str | list[str] | None + +
+

Product to associate dataset with. Defaults to None.

+
+
sub_category + str | list[str] | None + +
+

Sub-category. Defaults to None.

+
+
tags + str | list[str] | None + +
+

Tags used for search purposes. Defaults to None.

+
+
created_date + str | None + +
+

Created date. Defaults to None.

+
+
modified_date + str | None + +
+

Modified date. Defaults to None.

+
+
delivery_channel + str | list[str] + +
+

Delivery channel. Defaults to "API".

+
+
language + str + +
+

Language. Defaults to "English".

+
+
status + str + +
+

Status. Defaults to "Available".

+
+
type_ + str | None + +
+

Dataset type. Defaults to "Source".

+
+
container_type + str | None + +
+

Container type. Defaults to "Snapshot-Full".

+
+
snowflake + str | None + +
+

Snowflake account connection. Defaults to None.

+
+
complexity + str | None + +
+

Complexity. Defaults to None.

+
+
is_immutable + bool | None + +
+

Flag for immutable datasets. Defaults to None.

+
+
is_mnpi + bool | None + +
+

is_mnpi. Defaults to None.

+
+
is_pci + bool | None + +
+

is_pci. Defaults to None.

+
+
is_pii + bool | None + +
+

is_pii. Defaults to None.

+
+
is_client + bool | None + +
+

is_client. Defaults to None.

+
+
is_public + bool | None + +
+

is_public. Defaults to None.

+
+
is_internal + bool | None + +
+

is_internal. Defaults to None.

+
+
is_confidential + bool | None + +
+

is_confidential. Defaults to None.

+
+
is_highly_confidential + bool | None + +
+

is_highly_confidential. Defaults to None.

+
+
is_active + bool | None + +
+

is_active. Defaults to None.

+
+
owners + list[str] | None + +
+

The owners of the dataset. Defaults to None.

+
+
application_id + str | dict[str, str] | None + +
+

The application (most commonly seal ID) that the +dataset/report/flow is owned by. Accepts string format for seal IDs, or a dictionary containing 'id' and +'type' as keys. Defaults to None.

+
+
_client + Any + +
+

A Fusion client object. Defaults to None.

+
+
+ +
+ Source code in py_src/fusion/dataset.py +
 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
@dataclass
+class Dataset(metaclass=CamelCaseMeta):
+    """Fusion Dataset class for managing dataset metadata in a Fusion catalog.
+
+    Attributes:
+        identifier (str): A unique identifier for the dataset.
+        title (str, optional): A title for the dataset. If not provided, defaults to identifier.
+        category (str | list[str] | None, optional): A category or list of categories for the dataset. Defaults to None.
+        description (str, optional): A description of the dataset. If not provided, defaults to identifier.
+        frequency (str, optional): The frequency of the dataset. Defaults to "Once".
+        is_internal_only_dataset (bool, optional): Flag for internal datasets. Defaults to False.
+        is_third_party_data (bool, optional): Flag for third party data. Defaults to True.
+        is_restricted (bool | None, optional): Flag for restricted datasets. Defaults to None.
+        is_raw_data (bool, optional): Flag for raw datasets. Defaults to True.
+        maintainer (str | None, optional): Dataset maintainer. Defaults to "J.P. Morgan Fusion".
+        source (str | list[str] | None, optional): Name of data vendor which provided the data. Defaults to None.
+        region (str | list[str] | None, optional): Region. Defaults to None.
+        publisher (str, optional): Name of vendor that publishes the data. Defaults to "J.P. Morgan".
+        product (str | list[str] | None, optional): Product to associate dataset with. Defaults to None.
+        sub_category (str | list[str] | None, optional): Sub-category. Defaults to None.
+        tags (str | list[str] | None, optional): Tags used for search purposes. Defaults to None.
+        created_date (str | None, optional): Created date. Defaults to None.
+        modified_date (str | None, optional): Modified date. Defaults to None.
+        delivery_channel (str | list[str], optional): Delivery channel. Defaults to "API".
+        language (str, optional): Language. Defaults to "English".
+        status (str, optional): Status. Defaults to "Available".
+        type_ (str | None, optional): Dataset type. Defaults to "Source".
+        container_type (str | None, optional): Container type. Defaults to "Snapshot-Full".
+        snowflake (str | None, optional): Snowflake account connection. Defaults to None.
+        complexity (str | None, optional): Complexity. Defaults to None.
+        is_immutable (bool | None, optional): Flag for immutable datasets. Defaults to None.
+        is_mnpi (bool | None, optional): is_mnpi. Defaults to None.
+        is_pci (bool | None, optional): is_pci. Defaults to None.
+        is_pii (bool | None, optional): is_pii. Defaults to None.
+        is_client (bool | None, optional): is_client. Defaults to None.
+        is_public (bool | None, optional): is_public. Defaults to None.
+        is_internal (bool | None, optional): is_internal. Defaults to None.
+        is_confidential (bool | None, optional): is_confidential. Defaults to None.
+        is_highly_confidential (bool | None, optional): is_highly_confidential. Defaults to None.
+        is_active (bool | None, optional): is_active. Defaults to None.
+        owners (list[str] | None, optional): The owners of the dataset. Defaults to None.
+        application_id (str | dict[str, str] | None, optional): The application (most commonly seal ID) that the 
+            dataset/report/flow is owned by. Accepts string format for seal IDs, or a dictionary containing 'id' and
+            'type' as keys. Defaults to None.
+        _client (Any, optional): A Fusion client object. Defaults to None.
+
+    """
+
+    identifier: str
+    title: str = ""
+    category: str | list[str] | None = None
+    description: str = ""
+    frequency: str = "Once"
+    is_internal_only_dataset: bool = False
+    is_third_party_data: bool = True
+    is_restricted: bool | None = None
+    is_raw_data: bool = True
+    maintainer: str | None = "J.P. Morgan Fusion"
+    source: str | list[str] | None = None
+    region: str | list[str] | None = None
+    publisher: str = "J.P. Morgan"
+    product: str | list[str] | None = None
+    sub_category: str | list[str] | None = None
+    tags: str | list[str] | None = None
+    created_date: str | None = None
+    modified_date: str | None = None
+    delivery_channel: str | list[str] = field(default_factory=lambda: ["API"])
+    language: str = "English"
+    status: str = "Available"
+    type_: str | None = "Source"
+    container_type: str | None = "Snapshot-Full"
+    snowflake: str | None = None
+    complexity: str | None = None
+    is_immutable: bool | None = None
+    is_mnpi: bool | None = None
+    is_pci: bool | None = None
+    is_pii: bool | None = None
+    is_client: bool | None = None
+    is_public: bool | None = None
+    is_internal: bool | None = None
+    is_confidential: bool | None = None
+    is_highly_confidential: bool | None = None
+    is_active: bool | None = None
+    owners: list[str] | None = None
+    application_id: str | dict[str, str] | None = None
+
+    _client: Fusion | None = field(init=False, repr=False, compare=False, default=None)
+
+    def __repr__(self: Dataset) -> str:
+        """Return an object representation of the Dataset object.
+
+        Returns:
+            str: Object representation of the dataset.
+
+        """
+        attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+        return f"Dataset(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+    def __post_init__(self: Dataset) -> None:
+        """Format Dataset metadata fields after object initialization."""
+        self.identifier = tidy_string(self.identifier).upper().replace(" ", "_")
+        self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+        self.description = tidy_string(self.description) if self.description != "" else self.title
+        self.category = (
+            self.category if isinstance(self.category, list) or self.category is None else make_list(self.category)
+        )
+        self.delivery_channel = (
+            self.delivery_channel if isinstance(self.delivery_channel, list) else make_list(self.delivery_channel)
+        )
+        self.source = self.source if isinstance(self.source, list) or self.source is None else make_list(self.source)
+        self.region = self.region if isinstance(self.region, list) or self.region is None else make_list(self.region)
+        self.product = (
+            self.product if isinstance(self.product, list) or self.product is None else make_list(self.product)
+        )
+        self.sub_category = (
+            self.sub_category
+            if isinstance(self.sub_category, list) or self.sub_category is None
+            else make_list(self.sub_category)
+        )
+        self.tags = self.tags if isinstance(self.tags, list) or self.tags is None else make_list(self.tags)
+        self.is_internal_only_dataset = (
+            self.is_internal_only_dataset
+            if isinstance(self.is_internal_only_dataset, bool)
+            else make_bool(self.is_internal_only_dataset)
+        )
+        self.created_date = convert_date_format(self.created_date) if self.created_date else None
+        self.modified_date = convert_date_format(self.modified_date) if self.modified_date else None
+        self.owners = self.owners if isinstance(self.owners, list) or self.owners is None else make_list(self.owners)
+        self.application_id = (
+            {"id": str(self.application_id), "type": "Application (SEAL)"}
+            if isinstance(self.application_id, str)
+            else self.application_id
+        )
+
+    def __getattr__(self, name: str) -> Any:
+        # Redirect attribute access to the snake_case version
+        snake_name = camel_to_snake(name)
+        if snake_name in self.__dict__:
+            return self.__dict__[snake_name]
+        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
+
+    def __setattr__(self, name: str, value: Any) -> None:
+        if name == "client":
+            # Use the property setter for client
+            object.__setattr__(self, name, value)
+        else:
+            snake_name = camel_to_snake(name)
+            self.__dict__[snake_name] = value
+
+    @property
+    def client(self) -> Fusion | None:
+        """Return the client."""
+        return self._client
+
+    @client.setter
+    def client(self, client: Fusion | None) -> None:
+        """Set the client for the Dataset. Set automatically, if the Dataset is instantiated from a Fusion object.
+
+        Args:
+            client (Any): Fusion client object.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset")
+            >>> dataset.client = fusion
+
+        """
+        self._client = client
+
+    def _use_client(self, client: Fusion | None) -> Fusion:
+        """Determine client."""
+
+        res = self._client if client is None else client
+        if res is None:
+            raise ValueError("A Fusion client object is required.")
+        return res
+
+    @classmethod
+    def _from_series(cls: type[Dataset], series: pd.Series[Any]) -> Dataset:
+        """Instantiate a Dataset object from a pandas Series.
+
+        Args:
+            series (pd.Series[Any]): Dataset metadata as a pandas Series.
+
+        Returns:
+            Dataset: Dataset object.
+
+        """
+        series = series.rename(lambda x: x.replace(" ", "").replace("_", "").lower())
+        series = series.rename({"tag": "tags"})
+        series = series.rename({"type_": "type"})
+        series = series.rename({"productId": "product"})
+
+        is_internal_only_dataset = series.get("isinternalonlydataset", None)
+        is_internal_only_dataset = (
+            make_bool(is_internal_only_dataset) if is_internal_only_dataset is not None else is_internal_only_dataset
+        )
+        is_restricted = series.get("isrestricted", None)
+        is_restricted = make_bool(is_restricted) if is_restricted is not None else is_restricted
+        is_immutable = series.get("isimmutable", None)
+        is_immutable = make_bool(is_immutable) if is_immutable is not None else is_immutable
+        is_mnpi = series.get("ismnpi", None)
+        is_mnpi = make_bool(is_mnpi) if is_mnpi is not None else is_mnpi
+        is_pci = series.get("ispci", None)
+        is_pci = make_bool(is_pci) if is_pci is not None else is_pci
+        is_pii = series.get("ispii", None)
+        is_pii = make_bool(is_pii) if is_pii is not None else is_pii
+        is_client = series.get("isclient", None)
+        is_client = make_bool(is_client) if is_client is not None else is_client
+        is_public = series.get("ispublic", None)
+        is_public = make_bool(is_public) if is_public is not None else is_public
+        is_internal = series.get("isinternal", None)
+        is_internal = make_bool(is_internal) if is_internal is not None else is_internal
+        is_confidential = series.get("isconfidential", None)
+        is_confidential = make_bool(is_confidential) if is_confidential is not None else is_confidential
+        is_highly_confidential = series.get("ishighlyconfidential", None)
+        is_highly_confidential = (
+            make_bool(is_highly_confidential) if is_highly_confidential is not None else is_highly_confidential
+        )
+        is_active = series.get("isactive", None)
+        is_active = make_bool(is_active) if is_active is not None else is_active
+
+        dataset = cls(
+            identifier=series.get("identifier", ""),
+            category=series.get("category", None),
+            delivery_channel=series.get("deliverychannel", ["API"]),
+            title=series.get("title", ""),
+            description=series.get("description", ""),
+            frequency=series.get("frequency", "Once"),
+            is_internal_only_dataset=is_internal_only_dataset,  # type: ignore
+            is_third_party_data=series.get("isthirdpartydata", True),
+            is_restricted=is_restricted,
+            is_raw_data=series.get("israwdata", True),
+            maintainer=series.get("maintainer", "J.P. Morgan Fusion"),
+            source=series.get("source", None),
+            region=series.get("region", None),
+            publisher=series.get("publisher", "J.P. Morgan"),
+            product=series.get("product", None),
+            sub_category=series.get("subcategory", None),
+            tags=series.get("tags", None),
+            container_type=series.get("containertype", "Snapshot-Full"),
+            language=series.get("language", "English"),
+            status=series.get("status", "Available"),
+            type_=series.get("type", "Source"),
+            created_date=series.get("createddate", None),
+            modified_date=series.get("modifieddate", None),
+            snowflake=series.get("snowflake", None),
+            complexity=series.get("complexity", None),
+            owners=series.get("owners", None),
+            application_id=series.get("applicationid", None),
+            is_immutable=is_immutable,
+            is_mnpi=is_mnpi,
+            is_pci=is_pci,
+            is_pii=is_pii,
+            is_client=is_client,
+            is_public=is_public,
+            is_internal=is_internal,
+            is_confidential=is_confidential,
+            is_highly_confidential=is_highly_confidential,
+            is_active=is_active,
+        )
+        return dataset
+
+    @classmethod
+    def _from_dict(cls: type[Dataset], data: dict[str, Any]) -> Dataset:
+        """Instantiate a Dataset object from a dictionary.
+
+        Args:
+            data (dict[str, Any]): Dataset metadata as a dictionary.
+
+        Returns:
+            Dataset: Dataset object.
+
+        """
+        keys = [f.name for f in fields(cls)]
+        keys = ["type" if key == "type_" else key for key in keys]
+        data = {camel_to_snake(k): v for k, v in data.items()}
+        data = {k: v for k, v in data.items() if k in keys}
+        if "type" in data:
+            data["type_"] = data.pop("type")
+        return cls(**data)
+
+    @classmethod
+    def _from_csv(cls: type[Dataset], file_path: str, identifier: str | None = None) -> Dataset:
+        """Instantiate a Dataset object from a CSV file.
+
+        Args:
+            file_path (str): Path to the CSV file.
+            identifier (str | None, optional): Dataset identifer for filtering if multipler datasets are defined in csv.
+                Defaults to None.
+
+        Returns:
+            Dataset: Dataset object.
+
+        """
+        data = pd.read_csv(file_path)
+
+        return (
+            cls._from_series(data[data["identifier"] == identifier].reset_index(drop=True).iloc[0])
+            if identifier
+            else cls._from_series(data.reset_index(drop=True).iloc[0])
+        )
+
+    def from_object(
+        self,
+        dataset_source: Dataset | dict[str, Any] | str | pd.Series[Any],
+    ) -> Dataset:
+        """Instantiate a Dataset object from a Dataset object, dictionary, JSON string, path to CSV, or pandas Series.
+
+        Args:
+            dataset_source (Dataset | dict[str, Any] | str | pd.Series[Any]): Dataset metadata source.
+
+        Raises:
+            TypeError: If the object provided is not a Dataset, dictionary, JSON string, path to CSV file,
+                or pandas Series.
+
+        Returns:
+            Dataset: Dataset object.
+
+        Examples:
+            Instantiate a Dataset object from a dictionary:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_dict = {
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False,
+            ...     "is_raw_data": True,
+            ...     "maintainer": "J.P. Morgan Fusion",
+            ...     "source": "J.P. Morgan",
+            ...     }
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+
+            Instantiate a Dataset object from a JSON string:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_json = '{
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False,
+            ...     "is_raw_data": True,
+            ...     "maintainer": "J.P. Morgan Fusion",
+            ...     "source": "J.P. Morgan"
+            ...     }'
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+
+            Instantiate a Dataset object from a CSV file:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+
+            Instantiate a Dataset object from a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_series = pd.Series({
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False,
+            ...     "is_raw_data": True,
+            ...     "maintainer": "J.P. Morgan Fusion",
+            ...     "source": "J.P. Morgan"
+            ...     })
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+        """
+        if isinstance(dataset_source, Dataset):
+            dataset = dataset_source
+        elif isinstance(dataset_source, dict):
+            dataset = self._from_dict(dataset_source)
+        elif isinstance(dataset_source, str):
+            if _is_json(dataset_source):
+                dataset = self._from_dict(js.loads(dataset_source))
+            else:
+                dataset = self._from_csv(dataset_source)
+        elif isinstance(dataset_source, pd.Series):
+            dataset = self._from_series(dataset_source)
+        else:
+            raise TypeError(f"Could not resolve the object provided: {dataset_source}")
+
+        dataset.client = self._client
+
+        return dataset
+
+    def from_catalog(self, catalog: str | None = None, client: Fusion | None = None) -> Dataset:
+        """Instantiate a Dataset object from a Fusion catalog.
+
+        Args:
+            catalog (str | None, optional): Catalog identifer. Defaults to None.
+            client (Fusion | None, optional): Fusion session. Defaults to None.
+                If instantiated from a Fusion object, then the client is set automatically.
+
+        Returns:
+            Dataset: Dataset object.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        dataset = self.identifier
+        resp = client.session.get(f"{client.root_url}catalogs/{catalog}/datasets")
+        requests_raise_for_status(resp)
+        list_datasets = resp.json()["resources"]
+        dict_ = [dict_ for dict_ in list_datasets if dict_["identifier"] == dataset][0]
+        dataset_obj = self._from_dict(dict_)
+        dataset_obj.client = client
+
+        prod_df = client.list_product_dataset_mapping(catalog=catalog)
+
+        if dataset.lower() in list(prod_df.dataset.str.lower()):
+            product = [prod_df[prod_df["dataset"].str.lower() == dataset.lower()]["product"].iloc[0]]
+            dataset_obj.product = product
+
+        return dataset_obj
+
+    def to_dict(self) -> dict[str, Any]:
+        """Convert the Dataset instance to a dictionary.
+
+        Returns:
+            dict[str, Any]: Dataset metadata as a dictionary.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset")
+            >>> dataset_dict = dataset.to_dict()
+
+        """
+        dataset_dict = {snake_to_camel(k): v for k, v in self.__dict__.items() if not k.startswith("_")}
+
+        return dataset_dict
+
+    def create(
+        self,
+        catalog: str | None = None,
+        product: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Upload a new dataset to a Fusion catalog.
+
+        Args:
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            product (str | None, optional): A product identifier to upload dataset to. If dataset object already has
+                product attribute populated, the attribute will be overwritten by this value. Defaults to None.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            From scratch:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset(
+            ...     identifier= "my_dataset",
+            ...     title= "My Dataset",
+            ...     description= "My dataset description",
+            ...     category= "Finance",
+            ...     frequency= "Daily",
+            ...     is_restricted= False
+            ...     )
+            >>> dataset.create(catalog="my_catalog")
+
+            From a dictionary:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_dict = {
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False
+            ...     }
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+            >>> dataset.create(catalog="my_catalog")
+
+            From a JSON string:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_json = '{
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False
+            ...     }'
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+            >>> dataset.create(catalog="my_catalog")
+
+            From a CSV file:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+            >>> dataset.create(catalog="my_catalog")
+
+            From a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset_series = pd.Series({
+            ...     "identifier": "my_dataset",
+            ...     "title": "My Dataset",
+            ...     "description": "My dataset description",
+            ...     "category": "Finance",
+            ...     "frequency": "Daily",
+            ...     "is_restricted": False
+            ...     })
+            >>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+            From existing dataset in a catalog:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+            >>> dataset.identifier = "my_new_dataset"
+            >>> dataset.create(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        self.created_date = self.created_date if self.created_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+        self.modified_date = self.modified_date if self.modified_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+
+        self.product = [product] if product else self.product
+
+        data = self.to_dict()
+
+        if data.get("report", None) and data["report"]["tier"] == "":
+            raise ValueError("Tier cannot be blank for reports.")
+
+
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+        resp: requests.Response = client.session.post(url, json=data)
+        requests_raise_for_status(resp)
+
+        return resp if return_resp_obj else None
+
+    def update(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Updates a dataset via API from dataset object.
+
+        Args:
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+            >>> dataset.title = "My Updated Dataset"
+            >>> dataset.update(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        self.created_date = self.created_date if self.created_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+        self.modified_date = self.modified_date if self.modified_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+
+        data = self.to_dict()
+
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+        resp: requests.Response = client.session.put(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def delete(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Delete a dataset via API from its dataset identifier.
+
+        Args:
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.dataset("my_dataset").delete(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+        resp: requests.Response = client.session.delete(url)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def copy(
+        self,
+        catalog_to: str,
+        catalog_from: str | None = None,
+        client: Fusion | None = None,
+        client_to: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Copy dataset from one catalog and/or environment to another by copy.
+
+        Args:
+            catalog_to (str): A catalog identifier to which to copy dataset.
+            catalog_from (str, optional): A catalog identifier from which to copy dataset. Defaults to "common".
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            client_to (Fusion | None, optional): Fusion client object. Defaults to current instance.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> dataset = fusion.dataset("my_dataset").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog_from = client._use_catalog(catalog_from)
+
+        if client_to is None:
+            client_to = client
+        dataset_obj = self.from_catalog(catalog=catalog_from, client=client)
+        dataset_obj.client = client_to
+        resp = dataset_obj.create(client=client_to, catalog=catalog_to, return_resp_obj=True)
+        return resp if return_resp_obj else None
+
+    def activate(
+        self,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Activate a dataset by setting the isActive flag to True.
+
+        Args:
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.dataset("my_dataset").activate(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        dataset_obj = self.from_catalog(catalog=catalog, client=client)
+        dataset_obj.is_active = True
+        resp = dataset_obj.update(catalog=catalog, client=client, return_resp_obj=return_resp_obj)
+
+        return resp if return_resp_obj else None
+
+    def add_to_product(
+        self,
+        product: str,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Map dataset to a product.
+
+        Args:
+            product (str): A product identifier.
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.dataset("my_dataset").add_to_product(product="MY_PRODUCT", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        url = f"{client.root_url}catalogs/{catalog}/productDatasets"
+        data = {"product": product, "datasets": [self.identifier]}
+        resp = client.session.put(url=url, json=data)
+
+        requests_raise_for_status(resp)
+
+        return resp if return_resp_obj else None
+
+    def remove_from_product(
+        self,
+        product: str,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Delete dataset to product mapping.
+
+        Args:
+            product (str): A product identifier.
+            catalog (str | None, optional): A catalog identifier. Defaults to "common".
+            client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.dataset("my_dataset").remove_from_product(product="MY_PRODUCT", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        dataset = self.identifier
+        url = f"{client.root_url}catalogs/{catalog}/productDatasets/{product}/{dataset}"
+        resp = client.session.delete(url=url)
+
+        requests_raise_for_status(resp)
+
+        return resp if return_resp_obj else None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ client: Fusion | None + + + property + writable + + +

+ + +
+ +

Return the client.

+
+ +
+ + + +
+ + +

+ __post_init__() + +

+ + +
+ +

Format Dataset metadata fields after object initialization.

+ +
+ Source code in py_src/fusion/dataset.py +
127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
def __post_init__(self: Dataset) -> None:
+    """Format Dataset metadata fields after object initialization."""
+    self.identifier = tidy_string(self.identifier).upper().replace(" ", "_")
+    self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+    self.description = tidy_string(self.description) if self.description != "" else self.title
+    self.category = (
+        self.category if isinstance(self.category, list) or self.category is None else make_list(self.category)
+    )
+    self.delivery_channel = (
+        self.delivery_channel if isinstance(self.delivery_channel, list) else make_list(self.delivery_channel)
+    )
+    self.source = self.source if isinstance(self.source, list) or self.source is None else make_list(self.source)
+    self.region = self.region if isinstance(self.region, list) or self.region is None else make_list(self.region)
+    self.product = (
+        self.product if isinstance(self.product, list) or self.product is None else make_list(self.product)
+    )
+    self.sub_category = (
+        self.sub_category
+        if isinstance(self.sub_category, list) or self.sub_category is None
+        else make_list(self.sub_category)
+    )
+    self.tags = self.tags if isinstance(self.tags, list) or self.tags is None else make_list(self.tags)
+    self.is_internal_only_dataset = (
+        self.is_internal_only_dataset
+        if isinstance(self.is_internal_only_dataset, bool)
+        else make_bool(self.is_internal_only_dataset)
+    )
+    self.created_date = convert_date_format(self.created_date) if self.created_date else None
+    self.modified_date = convert_date_format(self.modified_date) if self.modified_date else None
+    self.owners = self.owners if isinstance(self.owners, list) or self.owners is None else make_list(self.owners)
+    self.application_id = (
+        {"id": str(self.application_id), "type": "Application (SEAL)"}
+        if isinstance(self.application_id, str)
+        else self.application_id
+    )
+
+
+
+ +
+ +
+ + +

+ __repr__() + +

+ + +
+ +

Return an object representation of the Dataset object.

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
str + str + +
+

Object representation of the dataset.

+
+
+ +
+ Source code in py_src/fusion/dataset.py +
117
+118
+119
+120
+121
+122
+123
+124
+125
def __repr__(self: Dataset) -> str:
+    """Return an object representation of the Dataset object.
+
+    Returns:
+        str: Object representation of the dataset.
+
+    """
+    attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+    return f"Dataset(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+
+
+ +
+ +
+ + +

+ activate(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Activate a dataset by setting the isActive flag to True.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
client + Fusion | None + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.dataset("my_dataset").activate(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
def activate(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Activate a dataset by setting the isActive flag to True.
+
+    Args:
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.dataset("my_dataset").activate(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    dataset_obj = self.from_catalog(catalog=catalog, client=client)
+    dataset_obj.is_active = True
+    resp = dataset_obj.update(catalog=catalog, client=client, return_resp_obj=return_resp_obj)
+
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ add_to_product(product, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Map dataset to a product.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
product + str + +
+

A product identifier.

+
+
+ required +
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
client + Fusion | None + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.dataset("my_dataset").add_to_product(product="MY_PRODUCT", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
def add_to_product(
+    self,
+    product: str,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Map dataset to a product.
+
+    Args:
+        product (str): A product identifier.
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.dataset("my_dataset").add_to_product(product="MY_PRODUCT", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    url = f"{client.root_url}catalogs/{catalog}/productDatasets"
+    data = {"product": product, "datasets": [self.identifier]}
+    resp = client.session.put(url=url, json=data)
+
+    requests_raise_for_status(resp)
+
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ copy(catalog_to, catalog_from=None, client=None, client_to=None, return_resp_obj=False) + +

+ + +
+ +

Copy dataset from one catalog and/or environment to another by copy.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog_to + str + +
+

A catalog identifier to which to copy dataset.

+
+
+ required +
catalog_from + str + +
+

A catalog identifier from which to copy dataset. Defaults to "common".

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
client_to + Fusion | None + +
+

Fusion client object. Defaults to current instance.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
def copy(
+    self,
+    catalog_to: str,
+    catalog_from: str | None = None,
+    client: Fusion | None = None,
+    client_to: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Copy dataset from one catalog and/or environment to another by copy.
+
+    Args:
+        catalog_to (str): A catalog identifier to which to copy dataset.
+        catalog_from (str, optional): A catalog identifier from which to copy dataset. Defaults to "common".
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        client_to (Fusion | None, optional): Fusion client object. Defaults to current instance.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").copy(catalog_from="my_catalog", catalog_to="my_new_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog_from = client._use_catalog(catalog_from)
+
+    if client_to is None:
+        client_to = client
+    dataset_obj = self.from_catalog(catalog=catalog_from, client=client)
+    dataset_obj.client = client_to
+    resp = dataset_obj.create(client=client_to, catalog=catalog_to, return_resp_obj=True)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ create(catalog=None, product=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Upload a new dataset to a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
product + str | None + +
+

A product identifier to upload dataset to. If dataset object already has +product attribute populated, the attribute will be overwritten by this value. Defaults to None.

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
From scratch:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset(
+...     identifier= "my_dataset",
+...     title= "My Dataset",
+...     description= "My dataset description",
+...     category= "Finance",
+...     frequency= "Daily",
+...     is_restricted= False
+...     )
+>>> dataset.create(catalog="my_catalog")
+
+From a dictionary:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_dict = {
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False
+...     }
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+>>> dataset.create(catalog="my_catalog")
+
+From a JSON string:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_json = '{
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False
+...     }'
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+>>> dataset.create(catalog="my_catalog")
+
+From a CSV file:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+>>> dataset.create(catalog="my_catalog")
+
+From a pandas Series:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_series = pd.Series({
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False
+...     })
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+From existing dataset in a catalog:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+>>> dataset.identifier = "my_new_dataset"
+>>> dataset.create(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
def create(
+    self,
+    catalog: str | None = None,
+    product: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Upload a new dataset to a Fusion catalog.
+
+    Args:
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        product (str | None, optional): A product identifier to upload dataset to. If dataset object already has
+            product attribute populated, the attribute will be overwritten by this value. Defaults to None.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        From scratch:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset(
+        ...     identifier= "my_dataset",
+        ...     title= "My Dataset",
+        ...     description= "My dataset description",
+        ...     category= "Finance",
+        ...     frequency= "Daily",
+        ...     is_restricted= False
+        ...     )
+        >>> dataset.create(catalog="my_catalog")
+
+        From a dictionary:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_dict = {
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False
+        ...     }
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+        >>> dataset.create(catalog="my_catalog")
+
+        From a JSON string:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_json = '{
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False
+        ...     }'
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+        >>> dataset.create(catalog="my_catalog")
+
+        From a CSV file:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+        >>> dataset.create(catalog="my_catalog")
+
+        From a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_series = pd.Series({
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False
+        ...     })
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+        From existing dataset in a catalog:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+        >>> dataset.identifier = "my_new_dataset"
+        >>> dataset.create(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    self.created_date = self.created_date if self.created_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+    self.modified_date = self.modified_date if self.modified_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+
+    self.product = [product] if product else self.product
+
+    data = self.to_dict()
+
+    if data.get("report", None) and data["report"]["tier"] == "":
+        raise ValueError("Tier cannot be blank for reports.")
+
+
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+    resp: requests.Response = client.session.post(url, json=data)
+    requests_raise_for_status(resp)
+
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ delete(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Delete a dataset via API from its dataset identifier.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.dataset("my_dataset").delete(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
def delete(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Delete a dataset via API from its dataset identifier.
+
+    Args:
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.dataset("my_dataset").delete(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+    resp: requests.Response = client.session.delete(url)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ from_catalog(catalog=None, client=None) + +

+ + +
+ +

Instantiate a Dataset object from a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

Catalog identifer. Defaults to None.

+
+
+ None +
client + Fusion | None + +
+

Fusion session. Defaults to None. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + Dataset + +
+

Dataset object.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
def from_catalog(self, catalog: str | None = None, client: Fusion | None = None) -> Dataset:
+    """Instantiate a Dataset object from a Fusion catalog.
+
+    Args:
+        catalog (str | None, optional): Catalog identifer. Defaults to None.
+        client (Fusion | None, optional): Fusion session. Defaults to None.
+            If instantiated from a Fusion object, then the client is set automatically.
+
+    Returns:
+        Dataset: Dataset object.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    dataset = self.identifier
+    resp = client.session.get(f"{client.root_url}catalogs/{catalog}/datasets")
+    requests_raise_for_status(resp)
+    list_datasets = resp.json()["resources"]
+    dict_ = [dict_ for dict_ in list_datasets if dict_["identifier"] == dataset][0]
+    dataset_obj = self._from_dict(dict_)
+    dataset_obj.client = client
+
+    prod_df = client.list_product_dataset_mapping(catalog=catalog)
+
+    if dataset.lower() in list(prod_df.dataset.str.lower()):
+        product = [prod_df[prod_df["dataset"].str.lower() == dataset.lower()]["product"].iloc[0]]
+        dataset_obj.product = product
+
+    return dataset_obj
+
+
+
+ +
+ +
+ + +

+ from_object(dataset_source) + +

+ + +
+ +

Instantiate a Dataset object from a Dataset object, dictionary, JSON string, path to CSV, or pandas Series.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset_source + Dataset | dict[str, Any] | str | Series[Any] + +
+

Dataset metadata source.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TypeError + +
+

If the object provided is not a Dataset, dictionary, JSON string, path to CSV file, +or pandas Series.

+
+
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Dataset + Dataset + +
+

Dataset object.

+
+
+ + +

Examples:

+

Instantiate a Dataset object from a dictionary:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_dict = {
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False,
+...     "is_raw_data": True,
+...     "maintainer": "J.P. Morgan Fusion",
+...     "source": "J.P. Morgan",
+...     }
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+
+

Instantiate a Dataset object from a JSON string:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_json = '{
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False,
+...     "is_raw_data": True,
+...     "maintainer": "J.P. Morgan Fusion",
+...     "source": "J.P. Morgan"
+...     }'
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+
+

Instantiate a Dataset object from a CSV file:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+
+

Instantiate a Dataset object from a pandas Series:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset_series = pd.Series({
+...     "identifier": "my_dataset",
+...     "title": "My Dataset",
+...     "description": "My dataset description",
+...     "category": "Finance",
+...     "frequency": "Daily",
+...     "is_restricted": False,
+...     "is_raw_data": True,
+...     "maintainer": "J.P. Morgan Fusion",
+...     "source": "J.P. Morgan"
+...     })
+>>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+ +
+ Source code in py_src/fusion/dataset.py +
333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
def from_object(
+    self,
+    dataset_source: Dataset | dict[str, Any] | str | pd.Series[Any],
+) -> Dataset:
+    """Instantiate a Dataset object from a Dataset object, dictionary, JSON string, path to CSV, or pandas Series.
+
+    Args:
+        dataset_source (Dataset | dict[str, Any] | str | pd.Series[Any]): Dataset metadata source.
+
+    Raises:
+        TypeError: If the object provided is not a Dataset, dictionary, JSON string, path to CSV file,
+            or pandas Series.
+
+    Returns:
+        Dataset: Dataset object.
+
+    Examples:
+        Instantiate a Dataset object from a dictionary:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_dict = {
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False,
+        ...     "is_raw_data": True,
+        ...     "maintainer": "J.P. Morgan Fusion",
+        ...     "source": "J.P. Morgan",
+        ...     }
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_dict)
+
+        Instantiate a Dataset object from a JSON string:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_json = '{
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False,
+        ...     "is_raw_data": True,
+        ...     "maintainer": "J.P. Morgan Fusion",
+        ...     "source": "J.P. Morgan"
+        ...     }'
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_json)
+
+        Instantiate a Dataset object from a CSV file:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").from_object("path/to/dataset.csv")
+
+        Instantiate a Dataset object from a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset_series = pd.Series({
+        ...     "identifier": "my_dataset",
+        ...     "title": "My Dataset",
+        ...     "description": "My dataset description",
+        ...     "category": "Finance",
+        ...     "frequency": "Daily",
+        ...     "is_restricted": False,
+        ...     "is_raw_data": True,
+        ...     "maintainer": "J.P. Morgan Fusion",
+        ...     "source": "J.P. Morgan"
+        ...     })
+        >>> dataset = fusion.dataset("my_dataset").from_object(dataset_series)
+
+    """
+    if isinstance(dataset_source, Dataset):
+        dataset = dataset_source
+    elif isinstance(dataset_source, dict):
+        dataset = self._from_dict(dataset_source)
+    elif isinstance(dataset_source, str):
+        if _is_json(dataset_source):
+            dataset = self._from_dict(js.loads(dataset_source))
+        else:
+            dataset = self._from_csv(dataset_source)
+    elif isinstance(dataset_source, pd.Series):
+        dataset = self._from_series(dataset_source)
+    else:
+        raise TypeError(f"Could not resolve the object provided: {dataset_source}")
+
+    dataset.client = self._client
+
+    return dataset
+
+
+
+ +
+ +
+ + +

+ remove_from_product(product, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Delete dataset to product mapping.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
product + str + +
+

A product identifier.

+
+
+ required +
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
client + Fusion | None + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.dataset("my_dataset").remove_from_product(product="MY_PRODUCT", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
def remove_from_product(
+    self,
+    product: str,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Delete dataset to product mapping.
+
+    Args:
+        product (str): A product identifier.
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        client (Fusion | None, optional):  A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.dataset("my_dataset").remove_from_product(product="MY_PRODUCT", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    dataset = self.identifier
+    url = f"{client.root_url}catalogs/{catalog}/productDatasets/{product}/{dataset}"
+    resp = client.session.delete(url=url)
+
+    requests_raise_for_status(resp)
+
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ to_dict() + +

+ + +
+ +

Convert the Dataset instance to a dictionary.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, Any] + +
+

dict[str, Any]: Dataset metadata as a dictionary.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset")
+>>> dataset_dict = dataset.to_dict()
+
+ +
+ Source code in py_src/fusion/dataset.py +
462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
def to_dict(self) -> dict[str, Any]:
+    """Convert the Dataset instance to a dictionary.
+
+    Returns:
+        dict[str, Any]: Dataset metadata as a dictionary.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset")
+        >>> dataset_dict = dataset.to_dict()
+
+    """
+    dataset_dict = {snake_to_camel(k): v for k, v in self.__dict__.items() if not k.startswith("_")}
+
+    return dataset_dict
+
+
+
+ +
+ +
+ + +

+ update(catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Updates a dataset via API from dataset object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
catalog + str | None + +
+

A catalog identifier. Defaults to "common".

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+>>> dataset.title = "My Updated Dataset"
+>>> dataset.update(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/dataset.py +
595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
def update(
+    self,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Updates a dataset via API from dataset object.
+
+    Args:
+        catalog (str | None, optional): A catalog identifier. Defaults to "common".
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> dataset = fusion.dataset("my_dataset").from_catalog(catalog="my_catalog")
+        >>> dataset.title = "My Updated Dataset"
+        >>> dataset.update(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    self.created_date = self.created_date if self.created_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+    self.modified_date = self.modified_date if self.modified_date else pd.Timestamp("today").strftime("%Y-%m-%d")
+
+    data = self.to_dict()
+
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{self.identifier}"
+    resp: requests.Response = client.session.put(url, json=data)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ +

Fusion Product class and functions.

+ + + +
+ + + + + + + + +
+ + + +

+ Attribute + + + + dataclass + + +

+ + +
+ + +

Fusion Attribute class for managing attributes metadata in a Fusion catalog.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
identifier + str + +
+

The unique identifier for the attribute.

+
+
index + int + +
+

Attribute index.

+
+
data_type + str | Types + +
+

Datatype of attribute. Defaults to "String".

+
+
title + str + +
+

Attribute title. If not provided, defaults to identifier.

+
+
description + str + +
+

Attribute description. If not provided, defaults to identifier.

+
+
is_dataset_key + bool + +
+

Flag for primary keys. Defaults to False.

+
+
source + str | None + +
+

Name of data vendor which provided the data. Defaults to None.

+
+
source_field_id + str | None + +
+

Original identifier of attribute, if attribute has been renamed. +If not provided, defaults to identifier.

+
+
is_internal_dataset_key + bool | None + +
+

Flag for internal primary keys. Defaults to None.

+
+
is_externally_visible + bool | None + +
+

Flag for externally visible attributes. Defaults to True.

+
+
unit + Any | None + +
+

Unit of attribute. Defaults to None.

+
+
multiplier + float + +
+

Multiplier for unit. Defaults to 1.0.

+
+
is_propagation_eligible + bool | None + +
+

Flag for propagation eligibility. Defaults to None.

+
+
is_metric + bool | None + +
+

Flag for attributes that are metrics. Defaults to None.

+
+
available_from + str | None + +
+

Date from which the attribute is available. Defaults to None.

+
+
deprecated_from + str | None + +
+

Date from which the attribute is deprecated. Defaults to None.

+
+
term + str + +
+

Term. Defaults to "bizterm1".

+
+
dataset + int | None + +
+

Dataset. Defaults to None.

+
+
attribute_type + str | None + +
+

Attribute type. Defaults to None.

+
+
application_id + str | dict[str, str] | None + +
+

The seal ID of the dataset in string format, +or a dictionary containing 'id' and 'type'. Used for catalog attributes. Defaults to None.

+
+
publisher + str | None + +
+

Publisher of the attribute. Used for catalog attributes. Defaults to None.

+
+
is_key_data_element + bool | None + +
+

Flag for key data elements. Used for attributes registered to +Reports. Defaults to None.

+
+
_client + Fusion | None + +
+

Fusion client object. Defaults to None.

+
+
+ +
+ Source code in py_src/fusion/attributes.py +
 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
@dataclass
+class Attribute(metaclass=CamelCaseMeta):
+    """Fusion Attribute class for managing attributes metadata in a Fusion catalog.
+
+    Attributes:
+        identifier (str): The unique identifier for the attribute.
+        index (int): Attribute index.
+        data_type (str | Types, optional): Datatype of attribute. Defaults to "String".
+        title (str, optional): Attribute title. If not provided, defaults to identifier.
+        description (str, optional): Attribute description. If not provided, defaults to identifier.
+        is_dataset_key (bool, optional): Flag for primary keys. Defaults to False.
+        source (str | None, optional): Name of data vendor which provided the data. Defaults to None.
+        source_field_id (str | None, optional): Original identifier of attribute, if attribute has been renamed.
+            If not provided, defaults to identifier.
+        is_internal_dataset_key (bool | None, optional): Flag for internal primary keys. Defaults to None.
+        is_externally_visible (bool | None, optional): Flag for externally visible attributes. Defaults to True.
+        unit (Any | None, optional): Unit of attribute. Defaults to None.
+        multiplier (float, optional): Multiplier for unit. Defaults to 1.0.
+        is_propagation_eligible (bool | None, optional): Flag for propagation eligibility. Defaults to None.
+        is_metric (bool | None, optional): Flag for attributes that are metrics. Defaults to None.
+        available_from (str | None, optional): Date from which the attribute is available. Defaults to None.
+        deprecated_from (str | None, optional): Date from which the attribute is deprecated. Defaults to None.
+        term (str, optional): Term. Defaults to "bizterm1".
+        dataset (int | None, optional): Dataset. Defaults to None.
+        attribute_type (str | None, optional): Attribute type. Defaults to None.
+        application_id (str | dict[str, str] | None, optional): The seal ID of the dataset in string format,
+            or a dictionary containing 'id' and 'type'. Used for catalog attributes. Defaults to None.
+        publisher (str | None, optional): Publisher of the attribute. Used for catalog attributes. Defaults to None.
+        is_key_data_element (bool | None, optional): Flag for key data elements. Used for attributes registered to
+            Reports. Defaults to None.
+        _client (Fusion | None, optional): Fusion client object. Defaults to None.
+
+    """
+
+    identifier: str
+    index: int
+    data_type: Types = cast(Types, Types.String)
+    title: str = ""
+    description: str = ""
+    is_dataset_key: bool = False
+    source: str | None = None
+    source_field_id: str | None = None
+    is_internal_dataset_key: bool | None = None
+    is_externally_visible: bool | None = True
+    unit: Any | None = None
+    multiplier: float = 1.0
+    is_propagation_eligible: bool | None = None
+    is_metric: bool | None = None
+    available_from: str | None = None
+    deprecated_from: str | None = None
+    term: str = "bizterm1"
+    dataset: int | None = None
+    attribute_type: str | None = None
+    application_id: str | dict[str, str] | None = None
+    publisher: str | None = None
+    is_key_data_element: bool | None = None
+
+    _client: Fusion | None = field(init=False, repr=False, compare=False, default=None)
+
+    def __str__(self: Attribute) -> str:
+        """Format string representation."""
+        attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+        return f"Attribute(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+    def __repr__(self: Attribute) -> str:
+        """Format object representation."""
+        s = ", ".join(f"{getattr(self, f.name)!r}" for f in fields(self) if not f.name.startswith("_"))
+        return "(" + s + ")"
+
+    def __post_init__(self: Attribute) -> None:
+        """Format Attribute metadata fields after object initialization."""
+        self.is_dataset_key = make_bool(self.is_dataset_key)
+        self.identifier = tidy_string(self.identifier).lower().replace(" ", "_")
+        self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+        self.description = tidy_string(self.description) if self.description and self.description != "" else self.title
+        self.source_field_id = (
+            tidy_string(self.source_field_id).lower().replace(" ", "_") if self.source_field_id else self.identifier
+        )
+        self.available_from = convert_date_format(self.available_from) if self.available_from else None
+        self.deprecated_from = convert_date_format(self.deprecated_from) if self.deprecated_from else None
+        self.data_type = Types[str(self.data_type).strip().rsplit(".", maxsplit=1)[-1].title()]
+        self.application_id = (
+            {"id": str(self.application_id), "type": "Application (SEAL)"}
+            if isinstance(self.application_id, str)
+            else self.application_id
+        )
+
+    def __getattr__(self, name: str) -> Any:
+        # Redirect attribute access to the snake_case version
+        snake_name = camel_to_snake(name)
+        if snake_name in self.__dict__:
+            return self.__dict__[snake_name]
+        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
+
+    def __setattr__(self, name: str, value: Any) -> None:
+        if name == "client":
+            # Use the property setter for client
+            object.__setattr__(self, name, value)
+        else:
+            snake_name = camel_to_snake(name)
+            self.__dict__[snake_name] = value
+
+    @property
+    def client(self) -> Fusion | None:
+        """Return the client."""
+        return self._client
+
+    @client.setter
+    def client(self, client: Fusion | None) -> None:
+        """Set the client for the Dataset. Set automatically, if the Dataset is instantiated from a Fusion object.
+
+        Args:
+            client (Any): Fusion client object.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attribute.client = fusion
+
+        """
+        self._client = client
+
+    def _use_client(self, client: Fusion | None) -> Fusion:
+        """Determine client."""
+
+        res = self._client if client is None else client
+        if res is None:
+            raise ValueError("A Fusion client object is required.")
+        return res
+
+    @classmethod
+    def _from_series(
+        cls: type[Attribute],
+        series: pd.Series[Any],
+    ) -> Attribute:
+        """Instantiate an Attribute object from a pandas Series.
+
+        Args:
+            series (pd.Series[Any]): Attribute metadata as a pandas Series.
+
+        Returns:
+            Attribute: Attribute object.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> series = pd.Series({
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ... })
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)._from_series(series)
+
+        """
+        series = series.rename(lambda x: x.replace(" ", "").replace("_", "").lower()).replace(
+            to_replace=np.nan, value=None
+        )
+        data_type = series.get("datatype", cast(Types, Types.String))
+        data_type = series.get("type", cast(Types, Types.String)) if data_type is None else data_type
+        source = series.get("source", None)
+        source = source.strip() if isinstance(source, str) else source
+
+        is_propagation_eligible = series.get("ispropagationeligible", None)
+        is_propagation_eligible = (
+            make_bool(is_propagation_eligible) if is_propagation_eligible is not None else is_propagation_eligible
+        )
+        is_metric = series.get("ismetric", None)
+        is_metric = make_bool(is_metric) if is_metric is not None else is_metric
+        is_internal_dataset_key = series.get("isinternaldatasetkey", None)
+        is_internal_dataset_key = (
+            make_bool(is_internal_dataset_key) if is_internal_dataset_key is not None else is_internal_dataset_key
+        )
+        is_externally_visible = series.get("isexternallyvisible", True)
+        is_externally_visible = (
+            make_bool(is_externally_visible) if is_externally_visible is not None else is_externally_visible
+        )
+
+        return cls(
+            identifier=series.get("identifier", "").strip(),
+            index=series.get("index", -1),
+            data_type=Types[data_type.strip().split(".")[-1].title()],
+            title=series.get("title", ""),
+            description=series.get("description", ""),
+            is_dataset_key=series.get("isdatasetkey", False),
+            source=source,
+            source_field_id=series.get("sourcefieldid", None),
+            is_internal_dataset_key=is_internal_dataset_key,
+            is_externally_visible=is_externally_visible,
+            unit=series.get("unit", None),
+            multiplier=series.get("multiplier", 1.0),
+            is_propagation_eligible=is_propagation_eligible,
+            is_metric=is_metric,
+            available_from=series.get("availablefrom", None),
+            deprecated_from=series.get("deprecatedfrom", None),
+            term=series.get("term", "bizterm1"),
+            dataset=series.get("dataset", None),
+            attribute_type=series.get("attributetype", None),
+        )
+
+    @classmethod
+    def _from_dict(cls: type[Attribute], data: dict[str, Any]) -> Attribute:
+        """Instantiate an Attribute object from a dictionary.
+
+        Args:
+            data (dict[str, Any]): Attribute metadata as a dictionary.
+
+        Returns:
+            Attribute: Attribute object.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = {
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ... }
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)._from_dict(data)
+
+        """
+        keys = [f.name for f in fields(cls)]
+        data = {camel_to_snake(k): v for k, v in data.items()}
+        data = {k: (None if pd.isna(v) else v) for k, v in data.items() if k in keys}
+        if "data_type" in data:
+            data["data_type"] = Types[data["data_type"].strip().rsplit(".", maxsplit=1)[-1].title()]
+        return cls(**data)
+
+    def from_object(
+        self,
+        attribute_source: Attribute | dict[str, Any] | pd.Series[Any],
+    ) -> Attribute:
+        """Instatiate an Attribute from an Attribute object, dictionary or pandas Series.
+
+        Args:
+            attribute_source (Attribute | dict[str, Any] | pd.Series[Any]): Attribute metadata source.
+
+        Raises:
+            TypeError: If the object provided is not an Attribute object, dictionary or pandas Series.
+
+        Returns:
+            Attribute: Attribute object.
+
+        Examples:
+
+            Instatiating a Attribute from a dictionary:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = {
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ... }
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+
+            Instatiating a Attribute from a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> series = pd.Series({
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ... })
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+
+        """
+        if isinstance(attribute_source, Attribute):
+            attribute = attribute_source
+        elif isinstance(attribute_source, dict):
+            attribute = self._from_dict(attribute_source)
+        elif isinstance(attribute_source, pd.Series):
+            attribute = self._from_series(attribute_source)
+        else:
+            raise ValueError(f"Could not resolve the object provided: {attribute_source}")
+        attribute.client = self._client
+        return attribute
+
+    def to_dict(self: Attribute) -> dict[str, Any]:
+        """Convert object to dictionary.
+
+        Returns:
+            dict[str, Any]: Attribute metadata as a dictionary.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attribute_dict = attribute.to_dict()
+
+        """
+        result = {snake_to_camel(k): v for k, v in self.__dict__.items() if not k.startswith("_")}
+        result["unit"] = str(self.unit) if self.unit is not None else None
+        result["dataType"] = self.data_type.name
+        if "isKeyDataElement" in result:
+            result["isCriticalDataElement"] = result.pop("isKeyDataElement")
+        return result
+
+    def create(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Upload a new attribute to a Fusion catalog.
+
+        Args:
+            dataset (str): Dataset identifier.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            Individually, from scratch:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute0 = fusion.attribute(identifier="my_attribute_0", index=0)
+            >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+            >>> attribute1 = fusion.attribute(identifier="my_attribute_1", index=1)
+            >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+            Individually, from a dictionary:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = {
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ...    }
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+            >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+            Individually, from a pandas Series:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> series = pd.Series({
+            ...     "identifier": "my_attribute",
+            ...     "index": 0,
+            ...     "data_type": "String",
+            ...     "title": "My Attribute",
+            ...     "description": "My attribute description"
+            ... })
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+            >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        data = self.to_dict()
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{self.identifier}"
+        resp = client.session.put(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def delete(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Delete an Attribute from a Fusion catalog.
+
+        Args:
+            dataset (str): Dataset identifier.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> fusion.attribute(identifier="my_attribute", index=0).delete(dataset="my_dataset", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{self.identifier}"
+        resp = client.session.delete(url)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+    def set_lineage(
+        self,
+        attributes: list[Attribute],
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Map an attribute to existing registered attributes in a Fusion catalog. Attributes from an output data flow
+            can be mapped to existing registered input data flow attributes. This supports the case in which the
+            generating application and receiving application store their attributes with different names.
+
+        Args:
+            attributes (str): List of Attribute objects to establish upstream lineage from.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> my_attr1 = fusion.attribute(identifier="my_attribute1", index=0, application_id="12345")
+            >>> my_attr2 = fusion.attribute(identifier="my_attribute2", index=0, application_id="12345")
+            >>> my_attr3 = fusion.attribute(identifier="my_attribute3", index=0, application_id="12345")
+            >>> attrs = [my_attr1, my_attr2]
+            >>> my_attr3.set_lineage(attributes=attrs, catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+
+        if self.application_id is None:
+            raise ValueError("The 'application_id' attribute is required for setting lineage.")
+        target_attributes = []
+        for attribute in attributes:
+            if attribute.application_id is None:
+                raise ValueError(f"The 'application_id' attribute is required for setting lineage.")
+            attr_dict = {
+                    "catalog": catalog,
+                    "attribute": attribute.identifier,
+                    "applicationId": attribute.application_id
+                }
+            target_attributes.append(attr_dict)
+
+        url = f"{client.root_url}catalogs/{catalog}/attributes/lineage"
+        data = [
+            {
+                "source": {
+                    "catalog": catalog,
+                    "attribute": self.identifier,
+                    "applicationId": self.application_id
+            },
+            "targets": target_attributes
+        }
+        ]
+        resp = client.session.post(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ client: Fusion | None + + + property + writable + + +

+ + +
+ +

Return the client.

+
+ +
+ + + +
+ + +

+ __post_init__() + +

+ + +
+ +

Format Attribute metadata fields after object initialization.

+ +
+ Source code in py_src/fusion/attributes.py +
 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
def __post_init__(self: Attribute) -> None:
+    """Format Attribute metadata fields after object initialization."""
+    self.is_dataset_key = make_bool(self.is_dataset_key)
+    self.identifier = tidy_string(self.identifier).lower().replace(" ", "_")
+    self.title = tidy_string(self.title) if self.title != "" else self.identifier.replace("_", " ").title()
+    self.description = tidy_string(self.description) if self.description and self.description != "" else self.title
+    self.source_field_id = (
+        tidy_string(self.source_field_id).lower().replace(" ", "_") if self.source_field_id else self.identifier
+    )
+    self.available_from = convert_date_format(self.available_from) if self.available_from else None
+    self.deprecated_from = convert_date_format(self.deprecated_from) if self.deprecated_from else None
+    self.data_type = Types[str(self.data_type).strip().rsplit(".", maxsplit=1)[-1].title()]
+    self.application_id = (
+        {"id": str(self.application_id), "type": "Application (SEAL)"}
+        if isinstance(self.application_id, str)
+        else self.application_id
+    )
+
+
+
+ +
+ +
+ + +

+ __repr__() + +

+ + +
+ +

Format object representation.

+ +
+ Source code in py_src/fusion/attributes.py +
92
+93
+94
+95
def __repr__(self: Attribute) -> str:
+    """Format object representation."""
+    s = ", ".join(f"{getattr(self, f.name)!r}" for f in fields(self) if not f.name.startswith("_"))
+    return "(" + s + ")"
+
+
+
+ +
+ +
+ + +

+ __str__() + +

+ + +
+ +

Format string representation.

+ +
+ Source code in py_src/fusion/attributes.py +
87
+88
+89
+90
def __str__(self: Attribute) -> str:
+    """Format string representation."""
+    attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+    return f"Attribute(\n" + ",\n ".join(f"{k}={v!r}" for k, v in attrs.items()) + "\n)"
+
+
+
+ +
+ +
+ + +

+ create(dataset, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Upload a new attribute to a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

Dataset identifier.

+
+
+ required +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
Individually, from scratch:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute0 = fusion.attribute(identifier="my_attribute_0", index=0)
+>>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+>>> attribute1 = fusion.attribute(identifier="my_attribute_1", index=1)
+>>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+Individually, from a dictionary:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> data = {
+...     "identifier": "my_attribute",
+...     "index": 0,
+...     "data_type": "String",
+...     "title": "My Attribute",
+...     "description": "My attribute description"
+...    }
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+>>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+Individually, from a pandas Series:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> import pandas as pd
+>>> series = pd.Series({
+...     "identifier": "my_attribute",
+...     "index": 0,
+...     "data_type": "String",
+...     "title": "My Attribute",
+...     "description": "My attribute description"
+... })
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+>>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
def create(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Upload a new attribute to a Fusion catalog.
+
+    Args:
+        dataset (str): Dataset identifier.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        Individually, from scratch:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute0 = fusion.attribute(identifier="my_attribute_0", index=0)
+        >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+        >>> attribute1 = fusion.attribute(identifier="my_attribute_1", index=1)
+        >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+        Individually, from a dictionary:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> data = {
+        ...     "identifier": "my_attribute",
+        ...     "index": 0,
+        ...     "data_type": "String",
+        ...     "title": "My Attribute",
+        ...     "description": "My attribute description"
+        ...    }
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+        >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+        Individually, from a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> import pandas as pd
+        >>> series = pd.Series({
+        ...     "identifier": "my_attribute",
+        ...     "index": 0,
+        ...     "data_type": "String",
+        ...     "title": "My Attribute",
+        ...     "description": "My attribute description"
+        ... })
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+        >>> attribute.create(dataset="my_dataset", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    data = self.to_dict()
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{self.identifier}"
+    resp = client.session.put(url, json=data)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ delete(dataset, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Delete an Attribute from a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

Dataset identifier.

+
+
+ required +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> fusion.attribute(identifier="my_attribute", index=0).delete(dataset="my_dataset", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
def delete(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Delete an Attribute from a Fusion catalog.
+
+    Args:
+        dataset (str): Dataset identifier.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> fusion.attribute(identifier="my_attribute", index=0).delete(dataset="my_dataset", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{self.identifier}"
+    resp = client.session.delete(url)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ from_object(attribute_source) + +

+ + +
+ +

Instatiate an Attribute from an Attribute object, dictionary or pandas Series.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
attribute_source + Attribute | dict[str, Any] | Series[Any] + +
+

Attribute metadata source.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TypeError + +
+

If the object provided is not an Attribute object, dictionary or pandas Series.

+
+
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Attribute + Attribute + +
+

Attribute object.

+
+
+
Instatiating a Attribute from a dictionary:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> data = {
+...     "identifier": "my_attribute",
+...     "index": 0,
+...     "data_type": "String",
+...     "title": "My Attribute",
+...     "description": "My attribute description"
+... }
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+
+Instatiating a Attribute from a pandas Series:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> import pandas as pd
+>>> series = pd.Series({
+...     "identifier": "my_attribute",
+...     "index": 0,
+...     "data_type": "String",
+...     "title": "My Attribute",
+...     "description": "My attribute description"
+... })
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+
+ +
+ Source code in py_src/fusion/attributes.py +
263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
def from_object(
+    self,
+    attribute_source: Attribute | dict[str, Any] | pd.Series[Any],
+) -> Attribute:
+    """Instatiate an Attribute from an Attribute object, dictionary or pandas Series.
+
+    Args:
+        attribute_source (Attribute | dict[str, Any] | pd.Series[Any]): Attribute metadata source.
+
+    Raises:
+        TypeError: If the object provided is not an Attribute object, dictionary or pandas Series.
+
+    Returns:
+        Attribute: Attribute object.
+
+    Examples:
+
+        Instatiating a Attribute from a dictionary:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> data = {
+        ...     "identifier": "my_attribute",
+        ...     "index": 0,
+        ...     "data_type": "String",
+        ...     "title": "My Attribute",
+        ...     "description": "My attribute description"
+        ... }
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(data)
+
+        Instatiating a Attribute from a pandas Series:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> import pandas as pd
+        >>> series = pd.Series({
+        ...     "identifier": "my_attribute",
+        ...     "index": 0,
+        ...     "data_type": "String",
+        ...     "title": "My Attribute",
+        ...     "description": "My attribute description"
+        ... })
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0).from_object(series)
+
+    """
+    if isinstance(attribute_source, Attribute):
+        attribute = attribute_source
+    elif isinstance(attribute_source, dict):
+        attribute = self._from_dict(attribute_source)
+    elif isinstance(attribute_source, pd.Series):
+        attribute = self._from_series(attribute_source)
+    else:
+        raise ValueError(f"Could not resolve the object provided: {attribute_source}")
+    attribute.client = self._client
+    return attribute
+
+
+
+ +
+ +
+ + +

+ set_lineage(attributes, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Map an attribute to existing registered attributes in a Fusion catalog. Attributes from an output data flow + can be mapped to existing registered input data flow attributes. This supports the case in which the + generating application and receiving application store their attributes with different names.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
attributes + str + +
+

List of Attribute objects to establish upstream lineage from.

+
+
+ required +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> my_attr1 = fusion.attribute(identifier="my_attribute1", index=0, application_id="12345")
+>>> my_attr2 = fusion.attribute(identifier="my_attribute2", index=0, application_id="12345")
+>>> my_attr3 = fusion.attribute(identifier="my_attribute3", index=0, application_id="12345")
+>>> attrs = [my_attr1, my_attr2]
+>>> my_attr3.set_lineage(attributes=attrs, catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
def set_lineage(
+    self,
+    attributes: list[Attribute],
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Map an attribute to existing registered attributes in a Fusion catalog. Attributes from an output data flow
+        can be mapped to existing registered input data flow attributes. This supports the case in which the
+        generating application and receiving application store their attributes with different names.
+
+    Args:
+        attributes (str): List of Attribute objects to establish upstream lineage from.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> my_attr1 = fusion.attribute(identifier="my_attribute1", index=0, application_id="12345")
+        >>> my_attr2 = fusion.attribute(identifier="my_attribute2", index=0, application_id="12345")
+        >>> my_attr3 = fusion.attribute(identifier="my_attribute3", index=0, application_id="12345")
+        >>> attrs = [my_attr1, my_attr2]
+        >>> my_attr3.set_lineage(attributes=attrs, catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+
+    if self.application_id is None:
+        raise ValueError("The 'application_id' attribute is required for setting lineage.")
+    target_attributes = []
+    for attribute in attributes:
+        if attribute.application_id is None:
+            raise ValueError(f"The 'application_id' attribute is required for setting lineage.")
+        attr_dict = {
+                "catalog": catalog,
+                "attribute": attribute.identifier,
+                "applicationId": attribute.application_id
+            }
+        target_attributes.append(attr_dict)
+
+    url = f"{client.root_url}catalogs/{catalog}/attributes/lineage"
+    data = [
+        {
+            "source": {
+                "catalog": catalog,
+                "attribute": self.identifier,
+                "applicationId": self.application_id
+        },
+        "targets": target_attributes
+    }
+    ]
+    resp = client.session.post(url, json=data)
+    requests_raise_for_status(resp)
+    return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ to_dict() + +

+ + +
+ +

Convert object to dictionary.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, Any] + +
+

dict[str, Any]: Attribute metadata as a dictionary.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attribute_dict = attribute.to_dict()
+
+ +
+ Source code in py_src/fusion/attributes.py +
319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
def to_dict(self: Attribute) -> dict[str, Any]:
+    """Convert object to dictionary.
+
+    Returns:
+        dict[str, Any]: Attribute metadata as a dictionary.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attribute_dict = attribute.to_dict()
+
+    """
+    result = {snake_to_camel(k): v for k, v in self.__dict__.items() if not k.startswith("_")}
+    result["unit"] = str(self.unit) if self.unit is not None else None
+    result["dataType"] = self.data_type.name
+    if "isKeyDataElement" in result:
+        result["isCriticalDataElement"] = result.pop("isKeyDataElement")
+    return result
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Attributes + + + + dataclass + + +

+ + +
+ + +

Class representing a collection of Attribute instances for managing atrribute metadata in a Fusion catalog.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
attributes + list[Attribute] + +
+

List of Attribute instances.

+
+
_client + Fusion | None + +
+

Fusion client object.

+
+
+ +
+ Source code in py_src/fusion/attributes.py +
504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
@dataclass
+class Attributes:
+    """Class representing a collection of Attribute instances for managing atrribute metadata in a Fusion catalog.
+
+    Attributes:
+        attributes (list[Attribute]): List of Attribute instances.
+        _client (Fusion | None): Fusion client object.
+
+    """
+
+    attributes: list[Attribute] = field(default_factory=list)
+
+    _client: Fusion | None = None
+
+    def __str__(self) -> str:
+        """String representation of the Attributes collection."""
+        return (
+            f"[\n" + ",\n ".join(f"{attr.__repr__()}" for attr in self.attributes) + "\n]" if self.attributes else "[]"
+        )
+
+    def __repr__(self) -> str:
+        """Object representation of the Attributes collection."""
+        return self.__str__()
+
+    @property
+    def client(self) -> Fusion | None:
+        """Return the client."""
+        return self._client
+
+    @client.setter
+    def client(self, client: Fusion | None) -> None:
+        """Set the client for the Dataset. Set automatically, if the Dataset is instantiated from a Fusion object.
+
+        Args:
+            client (Any): Fusion client object.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attributes = fusion.attributes()
+            >>> attributes.client = fusion
+
+        """
+        self._client = client
+
+    def _use_client(self, client: Fusion | None) -> Fusion:
+        """Determine client."""
+
+        res = self._client if client is None else client
+        if res is None:
+            raise ValueError("A Fusion client object is required.")
+        return res
+
+    def add_attribute(self, attribute: Attribute) -> None:
+        """Add an Attribute instance to the collection.
+
+        Args:
+            attribute (Attribute): Attribute instance to add.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes = fusion.attributes()
+            >>> attributes.add_attribute(attribute)
+
+        """
+        self.attributes.append(attribute)
+
+    def remove_attribute(self, identifier: str) -> bool:
+        """Remove an Attribute instance from the collection by identifier.
+
+        Args:
+            identifier (str): Identifier of the Attribute to remove.
+
+        Returns:
+            bool: True if the Attribute was removed, False otherwise.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes = fusion.attributes(attributes=[attribute])
+            >>> attributes.remove_attribute("my_attribute")
+
+        """
+        for attr in self.attributes:
+            if attr.identifier == identifier:
+                self.attributes.remove(attr)
+                return True
+        return False
+
+    def get_attribute(self, identifier: str) -> Attribute | None:
+        """Get an Attribute instance from the collection by identifier.
+
+        Args:
+            identifier (str): Identifier of the Attribute to retrieve.
+
+        Returns:
+            Attribute | None: The Attribute instance if found, None otherwise.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes =fusion.attributes(attributes=[attribute])
+            >>> retrieved_attribute = attributes.get_attribute("my_attribute")
+
+        """
+        for attr in self.attributes:
+            if attr.identifier == identifier:
+                return attr
+        return None
+
+    def to_dict(self) -> dict[str, list[dict[str, Any]]]:
+        """Convert the collection of Attribute instances to a list of dictionaries.
+
+        Returns:
+            dict[str, list[dict[str, Any]]]: Collection of Attribute instances as a dictionary.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes = fusion.attributes(attributes=[attribute])
+            >>> attributes_dict = attributes.to_dict()
+
+        """
+        dict_out = {"attributes": [attr.to_dict() for attr in self.attributes]}
+        return dict_out
+
+    @classmethod
+    def _from_dict_list(cls: type[Attributes], data: list[dict[str, Any]]) -> Attributes:
+        """Create an Attributes instance from a list of dictionaries.
+
+        Args:
+            data (list[dict[str, Any]]): List of dictionaries representing Attribute instances.
+
+        Returns:
+            Attributes: Attributes instance.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = [
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ]
+            >>> attributes = fusion.attributes()._from_dict_list(data)
+
+        """
+        attributes = [Attribute._from_dict(attr_data) for attr_data in data]
+        return Attributes(attributes=attributes)
+
+    @classmethod
+    def _from_dataframe(cls: type[Attributes], data: pd.DataFrame) -> Attributes:
+        """Create an Attributes instance from a pandas DataFrame.
+
+        Args:
+            data (pd.DataFrame): DataFrame representing Attribute instances.
+
+        Returns:
+            Attributes: Attributes instance.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> data = pd.DataFrame([
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ])
+            >>> attributes = fusion.attributes()._from_dataframe(data)
+
+        """
+        data = data.replace(to_replace=np.nan, value=None)
+        data = data.reset_index() if "index" not in data.columns else data
+        attributes = [Attribute._from_series(series) for _, series in data.iterrows()]
+        return Attributes(attributes=attributes)
+
+    def from_object(
+        self,
+        attributes_source: list[Attribute] | list[dict[str, Any]] | pd.DataFrame,
+    ) -> Attributes:
+        """Instantiate an Attributes object from a list of Attribute objects, dictionaries or pandas DataFrame.
+
+        Args:
+            attributes_source (list[Attribute] | list[dict[str, Any]] | pd.DataFrame): Attributes metadata source.
+
+        Raises:
+            TypeError: If the object provided is not a list of Attribute objects, dictionaries or pandas DataFrame.
+
+        Returns:
+            Attributes: Attributes object.
+
+        Examples:
+
+            Instatiating Attributes from a list of dictionaries:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = [
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ]
+            >>> attributes = fusion.attributes().from_object(data)
+
+            Instatiating Attributes from a pandas DataFrame:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> data = pd.DataFrame([
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ])
+            >>> attributes = fusion.attributes().from_object(data)
+
+        """
+        if isinstance(attributes_source, list):
+            if all(isinstance(attr, Attribute) for attr in attributes_source):
+                attributes = Attributes(cast(list[Attribute], attributes_source))
+            elif all(isinstance(attr, dict) for attr in attributes_source):
+                attributes = Attributes._from_dict_list(cast(list[dict[str, Any]], attributes_source))
+        elif isinstance(attributes_source, pd.DataFrame):
+            attributes = Attributes._from_dataframe(attributes_source)
+        else:
+            raise ValueError(f"Could not resolve the object provided: {attributes_source}")
+        attributes.client = self._client
+        return attributes
+
+    def to_dataframe(self) -> pd.DataFrame:
+        """Convert the collection of Attribute instances to a pandas DataFrame.
+
+        Returns:
+            pd.DataFrame: DataFrame representing the collection of Attribute instances.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes = fusion.attributes(attributes=[attribute])
+            >>> attributes_df = attributes.to_dataframe()
+
+        """
+        if len(self.attributes) == 0:
+            self.attributes = [Attribute(identifier="example_attribute", index=0)]
+        data = [attr.to_dict() for attr in self.attributes]
+        return pd.DataFrame(data)
+
+    def from_catalog(self, dataset: str, catalog: str | None = None, client: Fusion | None = None) -> Attributes:
+        """Instatiate an Attributes object from a dataset's attributes in a Fusion catalog.
+
+        Args:
+            dataset (str): The dataset identifier.
+            catalog (str | None, optional): The catalog identifier. Defaults to None.
+            client (Fusion | None, optional): Fusion session. Defaults to None.
+                If instantiated from a Fusion object, then the client is set automatically.
+
+        Returns:
+            Attributes: An instance of the Attributes class with the attributes from the catalog.
+
+        Examples:
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+        response = client.session.get(url)
+        requests_raise_for_status(response)
+        list_attributes = response.json()["resources"]
+        list_attributes = sorted(list_attributes, key=lambda x: x["index"])
+
+        self.attributes = [Attribute._from_dict(attr_data) for attr_data in list_attributes]
+        return self
+
+    def create(
+        self,
+        dataset: str | None = None,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> requests.Response | None:
+        """Upload the Attributes to a dataset in a Fusion catalog. If no dataset is provided,
+            attributes are registered to the catalog.
+
+        Args:
+            dataset (str): Dataset identifier.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+                If instantiated from a Fusion object, then the client is set automatically.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+        Examples:
+
+            From scratch:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+            >>> attributes = fusion.attributes(attributes=[attribute])
+            >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+            From a list of dictionaries:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> data = [
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ]
+            >>> attributes = fusion.attributes().from_dict_list(data)
+            >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+            From a pandas DataFrame:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> import pandas as pd
+            >>> data = pd.DataFrame([
+            ...     {
+            ...         "identifier": "my_attribute",
+            ...         "index": 0,
+            ...         "data_type": "String",
+            ...         "title": "My Attribute",
+            ...         "description": "My attribute description"
+            ...     }
+            ... ])
+            >>> attributes = fusion.attributes().from_dataframe(data)
+            >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+            From existing dataset's attributes in a Fusion catalog:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+            >>> attributes.create(dataset="my_new_dataset", catalog="my_catalog")
+
+            Register attributes to a catalog:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attribute = fusion.attribute(identifier="my_attribute", index=0, application_id="123", publisher="JPM")
+            >>> attributes = fusion.attributes(attributes=[attribute])
+            >>> attributes.create(catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        data = self.to_dict()
+        if dataset:
+            url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+            resp = client.session.put(url, json=data)
+            requests_raise_for_status(resp)
+            return resp if return_resp_obj else None
+        else:
+            for attr in self.attributes:
+                if attr.publisher is None:
+                    raise ValueError("The 'publisher' attribute is required for catalog attributes.")
+                if attr.application_id is None:
+                    raise ValueError("The 'application_id' attribute is required for catalog attributes.")
+            url = f"{client.root_url}catalogs/{catalog}/attributes"
+            data_ = data.get("attributes", None)
+            resp = client.session.post(url, json=data_)
+            requests_raise_for_status(resp)
+            return resp if return_resp_obj else None
+
+    def delete(
+        self,
+        dataset: str,
+        catalog: str | None = None,
+        client: Fusion | None = None,
+        return_resp_obj: bool = False,
+    ) -> list[requests.Response] | None:
+        """Delete the Attributes from a Fusion catalog.
+
+        Args:
+            dataset (str): Dataset identifier.
+            client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            catalog (str, optional): A catalog identifier. Defaults to None.
+            return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+        Returns:
+            list[requests.Response] | None: List of response objects from the API calls if return_resp_obj is True,
+                otherwise None.
+
+        Examples:
+
+            >>> from fusion import Fusion
+            >>> fusion = Fusion()
+            >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+            >>> attributes.delete(dataset="my_dataset", catalog="my_catalog")
+
+        """
+        client = self._use_client(client)
+        catalog = client._use_catalog(catalog)
+        responses = []
+        for attr in self.attributes:
+            resp = client.session.delete(
+                f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{attr.identifier}"
+            )
+            requests_raise_for_status(resp)
+            responses.append(resp)
+
+        return responses if return_resp_obj else None
+
+
+ + + +
+ + + + + + + +
+ + + +

+ client: Fusion | None + + + property + writable + + +

+ + +
+ +

Return the client.

+
+ +
+ + + +
+ + +

+ __repr__() + +

+ + +
+ +

Object representation of the Attributes collection.

+ +
+ Source code in py_src/fusion/attributes.py +
524
+525
+526
def __repr__(self) -> str:
+    """Object representation of the Attributes collection."""
+    return self.__str__()
+
+
+
+ +
+ +
+ + +

+ __str__() + +

+ + +
+ +

String representation of the Attributes collection.

+ +
+ Source code in py_src/fusion/attributes.py +
518
+519
+520
+521
+522
def __str__(self) -> str:
+    """String representation of the Attributes collection."""
+    return (
+        f"[\n" + ",\n ".join(f"{attr.__repr__()}" for attr in self.attributes) + "\n]" if self.attributes else "[]"
+    )
+
+
+
+ +
+ +
+ + +

+ add_attribute(attribute) + +

+ + +
+ +

Add an Attribute instance to the collection.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
attribute + Attribute + +
+

Attribute instance to add.

+
+
+ required +
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes = fusion.attributes()
+>>> attributes.add_attribute(attribute)
+
+ +
+ Source code in py_src/fusion/attributes.py +
557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
def add_attribute(self, attribute: Attribute) -> None:
+    """Add an Attribute instance to the collection.
+
+    Args:
+        attribute (Attribute): Attribute instance to add.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes = fusion.attributes()
+        >>> attributes.add_attribute(attribute)
+
+    """
+    self.attributes.append(attribute)
+
+
+
+ +
+ +
+ + +

+ create(dataset=None, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Upload the Attributes to a dataset in a Fusion catalog. If no dataset is provided, + attributes are registered to the catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

Dataset identifier.

+
+
+ None +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Response | None + +
+

requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.

+
+
+
From scratch:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes = fusion.attributes(attributes=[attribute])
+>>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+From a list of dictionaries:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> data = [
+...     {
+...         "identifier": "my_attribute",
+...         "index": 0,
+...         "data_type": "String",
+...         "title": "My Attribute",
+...         "description": "My attribute description"
+...     }
+... ]
+>>> attributes = fusion.attributes().from_dict_list(data)
+>>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+From a pandas DataFrame:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> import pandas as pd
+>>> data = pd.DataFrame([
+...     {
+...         "identifier": "my_attribute",
+...         "index": 0,
+...         "data_type": "String",
+...         "title": "My Attribute",
+...         "description": "My attribute description"
+...     }
+... ])
+>>> attributes = fusion.attributes().from_dataframe(data)
+>>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+From existing dataset's attributes in a Fusion catalog:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+>>> attributes.create(dataset="my_new_dataset", catalog="my_catalog")
+
+Register attributes to a catalog:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0, application_id="123", publisher="JPM")
+>>> attributes = fusion.attributes(attributes=[attribute])
+>>> attributes.create(catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
def create(
+    self,
+    dataset: str | None = None,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> requests.Response | None:
+    """Upload the Attributes to a dataset in a Fusion catalog. If no dataset is provided,
+        attributes are registered to the catalog.
+
+    Args:
+        dataset (str): Dataset identifier.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+            If instantiated from a Fusion object, then the client is set automatically.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        requests.Response | None: The response object from the API call if return_resp_obj is True, otherwise None.
+
+    Examples:
+
+        From scratch:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes = fusion.attributes(attributes=[attribute])
+        >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+        From a list of dictionaries:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> data = [
+        ...     {
+        ...         "identifier": "my_attribute",
+        ...         "index": 0,
+        ...         "data_type": "String",
+        ...         "title": "My Attribute",
+        ...         "description": "My attribute description"
+        ...     }
+        ... ]
+        >>> attributes = fusion.attributes().from_dict_list(data)
+        >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+        From a pandas DataFrame:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> import pandas as pd
+        >>> data = pd.DataFrame([
+        ...     {
+        ...         "identifier": "my_attribute",
+        ...         "index": 0,
+        ...         "data_type": "String",
+        ...         "title": "My Attribute",
+        ...         "description": "My attribute description"
+        ...     }
+        ... ])
+        >>> attributes = fusion.attributes().from_dataframe(data)
+        >>> attributes.create(dataset="my_dataset", catalog="my_catalog")
+
+        From existing dataset's attributes in a Fusion catalog:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+        >>> attributes.create(dataset="my_new_dataset", catalog="my_catalog")
+
+        Register attributes to a catalog:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0, application_id="123", publisher="JPM")
+        >>> attributes = fusion.attributes(attributes=[attribute])
+        >>> attributes.create(catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    data = self.to_dict()
+    if dataset:
+        url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+        resp = client.session.put(url, json=data)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+    else:
+        for attr in self.attributes:
+            if attr.publisher is None:
+                raise ValueError("The 'publisher' attribute is required for catalog attributes.")
+            if attr.application_id is None:
+                raise ValueError("The 'application_id' attribute is required for catalog attributes.")
+        url = f"{client.root_url}catalogs/{catalog}/attributes"
+        data_ = data.get("attributes", None)
+        resp = client.session.post(url, json=data_)
+        requests_raise_for_status(resp)
+        return resp if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ delete(dataset, catalog=None, client=None, return_resp_obj=False) + +

+ + +
+ +

Delete the Attributes from a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

Dataset identifier.

+
+
+ required +
client + Fusion + +
+

A Fusion client object. Defaults to the instance's _client.

+
+
+ None +
catalog + str + +
+

A catalog identifier. Defaults to None.

+
+
+ None +
return_resp_obj + bool + +
+

If True then return the response object. Defaults to False.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[Response] | None + +
+

list[requests.Response] | None: List of response objects from the API calls if return_resp_obj is True, +otherwise None.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+>>> attributes.delete(dataset="my_dataset", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
def delete(
+    self,
+    dataset: str,
+    catalog: str | None = None,
+    client: Fusion | None = None,
+    return_resp_obj: bool = False,
+) -> list[requests.Response] | None:
+    """Delete the Attributes from a Fusion catalog.
+
+    Args:
+        dataset (str): Dataset identifier.
+        client (Fusion, optional): A Fusion client object. Defaults to the instance's _client.
+        catalog (str, optional): A catalog identifier. Defaults to None.
+        return_resp_obj (bool, optional): If True then return the response object. Defaults to False.
+
+    Returns:
+        list[requests.Response] | None: List of response objects from the API calls if return_resp_obj is True,
+            otherwise None.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+        >>> attributes.delete(dataset="my_dataset", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    responses = []
+    for attr in self.attributes:
+        resp = client.session.delete(
+            f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes/{attr.identifier}"
+        )
+        requests_raise_for_status(resp)
+        responses.append(resp)
+
+    return responses if return_resp_obj else None
+
+
+
+ +
+ +
+ + +

+ from_catalog(dataset, catalog=None, client=None) + +

+ + +
+ +

Instatiate an Attributes object from a dataset's attributes in a Fusion catalog.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset + str + +
+

The dataset identifier.

+
+
+ required +
catalog + str | None + +
+

The catalog identifier. Defaults to None.

+
+
+ None +
client + Fusion | None + +
+

Fusion session. Defaults to None. +If instantiated from a Fusion object, then the client is set automatically.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Attributes + Attributes + +
+

An instance of the Attributes class with the attributes from the catalog.

+
+
+ + +

Examples:

+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+
+ +
+ Source code in py_src/fusion/attributes.py +
782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
def from_catalog(self, dataset: str, catalog: str | None = None, client: Fusion | None = None) -> Attributes:
+    """Instatiate an Attributes object from a dataset's attributes in a Fusion catalog.
+
+    Args:
+        dataset (str): The dataset identifier.
+        catalog (str | None, optional): The catalog identifier. Defaults to None.
+        client (Fusion | None, optional): Fusion session. Defaults to None.
+            If instantiated from a Fusion object, then the client is set automatically.
+
+    Returns:
+        Attributes: An instance of the Attributes class with the attributes from the catalog.
+
+    Examples:
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attributes = fusion.attributes().from_catalog(dataset="my_dataset", catalog="my_catalog")
+
+    """
+    client = self._use_client(client)
+    catalog = client._use_catalog(catalog)
+    url = f"{client.root_url}catalogs/{catalog}/datasets/{dataset}/attributes"
+    response = client.session.get(url)
+    requests_raise_for_status(response)
+    list_attributes = response.json()["resources"]
+    list_attributes = sorted(list_attributes, key=lambda x: x["index"])
+
+    self.attributes = [Attribute._from_dict(attr_data) for attr_data in list_attributes]
+    return self
+
+
+
+ +
+ +
+ + +

+ from_object(attributes_source) + +

+ + +
+ +

Instantiate an Attributes object from a list of Attribute objects, dictionaries or pandas DataFrame.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
attributes_source + list[Attribute] | list[dict[str, Any]] | DataFrame + +
+

Attributes metadata source.

+
+
+ required +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TypeError + +
+

If the object provided is not a list of Attribute objects, dictionaries or pandas DataFrame.

+
+
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Attributes + Attributes + +
+

Attributes object.

+
+
+
Instatiating Attributes from a list of dictionaries:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> data = [
+...     {
+...         "identifier": "my_attribute",
+...         "index": 0,
+...         "data_type": "String",
+...         "title": "My Attribute",
+...         "description": "My attribute description"
+...     }
+... ]
+>>> attributes = fusion.attributes().from_object(data)
+
+Instatiating Attributes from a pandas DataFrame:
+
+>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> import pandas as pd
+>>> data = pd.DataFrame([
+...     {
+...         "identifier": "my_attribute",
+...         "index": 0,
+...         "data_type": "String",
+...         "title": "My Attribute",
+...         "description": "My attribute description"
+...     }
+... ])
+>>> attributes = fusion.attributes().from_object(data)
+
+ +
+ Source code in py_src/fusion/attributes.py +
700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
def from_object(
+    self,
+    attributes_source: list[Attribute] | list[dict[str, Any]] | pd.DataFrame,
+) -> Attributes:
+    """Instantiate an Attributes object from a list of Attribute objects, dictionaries or pandas DataFrame.
+
+    Args:
+        attributes_source (list[Attribute] | list[dict[str, Any]] | pd.DataFrame): Attributes metadata source.
+
+    Raises:
+        TypeError: If the object provided is not a list of Attribute objects, dictionaries or pandas DataFrame.
+
+    Returns:
+        Attributes: Attributes object.
+
+    Examples:
+
+        Instatiating Attributes from a list of dictionaries:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> data = [
+        ...     {
+        ...         "identifier": "my_attribute",
+        ...         "index": 0,
+        ...         "data_type": "String",
+        ...         "title": "My Attribute",
+        ...         "description": "My attribute description"
+        ...     }
+        ... ]
+        >>> attributes = fusion.attributes().from_object(data)
+
+        Instatiating Attributes from a pandas DataFrame:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> import pandas as pd
+        >>> data = pd.DataFrame([
+        ...     {
+        ...         "identifier": "my_attribute",
+        ...         "index": 0,
+        ...         "data_type": "String",
+        ...         "title": "My Attribute",
+        ...         "description": "My attribute description"
+        ...     }
+        ... ])
+        >>> attributes = fusion.attributes().from_object(data)
+
+    """
+    if isinstance(attributes_source, list):
+        if all(isinstance(attr, Attribute) for attr in attributes_source):
+            attributes = Attributes(cast(list[Attribute], attributes_source))
+        elif all(isinstance(attr, dict) for attr in attributes_source):
+            attributes = Attributes._from_dict_list(cast(list[dict[str, Any]], attributes_source))
+    elif isinstance(attributes_source, pd.DataFrame):
+        attributes = Attributes._from_dataframe(attributes_source)
+    else:
+        raise ValueError(f"Could not resolve the object provided: {attributes_source}")
+    attributes.client = self._client
+    return attributes
+
+
+
+ +
+ +
+ + +

+ get_attribute(identifier) + +

+ + +
+ +

Get an Attribute instance from the collection by identifier.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Identifier of the Attribute to retrieve.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Attribute | None + +
+

Attribute | None: The Attribute instance if found, None otherwise.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes =fusion.attributes(attributes=[attribute])
+>>> retrieved_attribute = attributes.get_attribute("my_attribute")
+
+ +
+ Source code in py_src/fusion/attributes.py +
598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
def get_attribute(self, identifier: str) -> Attribute | None:
+    """Get an Attribute instance from the collection by identifier.
+
+    Args:
+        identifier (str): Identifier of the Attribute to retrieve.
+
+    Returns:
+        Attribute | None: The Attribute instance if found, None otherwise.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes =fusion.attributes(attributes=[attribute])
+        >>> retrieved_attribute = attributes.get_attribute("my_attribute")
+
+    """
+    for attr in self.attributes:
+        if attr.identifier == identifier:
+            return attr
+    return None
+
+
+
+ +
+ +
+ + +

+ remove_attribute(identifier) + +

+ + +
+ +

Remove an Attribute instance from the collection by identifier.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
identifier + str + +
+

Identifier of the Attribute to remove.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
bool + bool + +
+

True if the Attribute was removed, False otherwise.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes = fusion.attributes(attributes=[attribute])
+>>> attributes.remove_attribute("my_attribute")
+
+ +
+ Source code in py_src/fusion/attributes.py +
574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
def remove_attribute(self, identifier: str) -> bool:
+    """Remove an Attribute instance from the collection by identifier.
+
+    Args:
+        identifier (str): Identifier of the Attribute to remove.
+
+    Returns:
+        bool: True if the Attribute was removed, False otherwise.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes = fusion.attributes(attributes=[attribute])
+        >>> attributes.remove_attribute("my_attribute")
+
+    """
+    for attr in self.attributes:
+        if attr.identifier == identifier:
+            self.attributes.remove(attr)
+            return True
+    return False
+
+
+
+ +
+ +
+ + +

+ to_dataframe() + +

+ + +
+ +

Convert the collection of Attribute instances to a pandas DataFrame.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ DataFrame + +
+

pd.DataFrame: DataFrame representing the collection of Attribute instances.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> import pandas as pd
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes = fusion.attributes(attributes=[attribute])
+>>> attributes_df = attributes.to_dataframe()
+
+ +
+ Source code in py_src/fusion/attributes.py +
761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
def to_dataframe(self) -> pd.DataFrame:
+    """Convert the collection of Attribute instances to a pandas DataFrame.
+
+    Returns:
+        pd.DataFrame: DataFrame representing the collection of Attribute instances.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> import pandas as pd
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes = fusion.attributes(attributes=[attribute])
+        >>> attributes_df = attributes.to_dataframe()
+
+    """
+    if len(self.attributes) == 0:
+        self.attributes = [Attribute(identifier="example_attribute", index=0)]
+    data = [attr.to_dict() for attr in self.attributes]
+    return pd.DataFrame(data)
+
+
+
+ +
+ +
+ + +

+ to_dict() + +

+ + +
+ +

Convert the collection of Attribute instances to a list of dictionaries.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, list[dict[str, Any]]] + +
+

dict[str, list[dict[str, Any]]]: Collection of Attribute instances as a dictionary.

+
+
+
>>> from fusion import Fusion
+>>> fusion = Fusion()
+>>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+>>> attributes = fusion.attributes(attributes=[attribute])
+>>> attributes_dict = attributes.to_dict()
+
+ +
+ Source code in py_src/fusion/attributes.py +
621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
def to_dict(self) -> dict[str, list[dict[str, Any]]]:
+    """Convert the collection of Attribute instances to a list of dictionaries.
+
+    Returns:
+        dict[str, list[dict[str, Any]]]: Collection of Attribute instances as a dictionary.
+
+    Examples:
+
+        >>> from fusion import Fusion
+        >>> fusion = Fusion()
+        >>> attribute = fusion.attribute(identifier="my_attribute", index=0)
+        >>> attributes = fusion.attributes(attributes=[attribute])
+        >>> attributes_dict = attributes.to_dict()
+
+    """
+    dict_out = {"attributes": [attr.to_dict() for attr in self.attributes]}
+    return dict_out
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/docs/2.0.6/assets/_mkdocstrings.css b/docs/2.0.6/assets/_mkdocstrings.css new file mode 100644 index 0000000..85449ec --- /dev/null +++ b/docs/2.0.6/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/docs/2.0.6/assets/images/favicon.png b/docs/2.0.6/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js b/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js new file mode 100644 index 0000000..4b08eae --- /dev/null +++ b/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.525ec568.min.js.map + diff --git a/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js.map b/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js.map new file mode 100644 index 0000000..ef5d8d3 --- /dev/null +++ b/docs/2.0.6/assets/javascripts/bundle.525ec568.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an