forked from ggerganov/llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test-backend-ops.cpp
3620 lines (2958 loc) · 124 KB
/
test-backend-ops.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file defines tests for various GGML ops and backends.
// For the forward pass it asserts that the results of multiple backends computing the same GGML ops are consistent.
// For the backwards pass it asserts that the gradients from backpropagation are consistent
// with the gradients obtained via the method of finite differences ("grad" mode, this is optional).
// It is also possible to check the performance ("perf" mode).
//
// this file has three sections: Section 1 does general setup, section 2 defines the GGML ops to be tested,
// and section 3 defines which tests to run.
// Quick start for adding a new GGML op: Go to section 2 and create a struct that inherits from test_case,
// then go to section 3 and add an instantiation of your struct.
// ##############################
// ## Section 1: General Setup ##
// ##############################
#include <ggml.h>
#include <ggml-alloc.h>
#include <ggml-backend.h>
#include <algorithm>
#include <array>
#include <cfloat>
#include <cstdint>
#include <cstring>
#include <cinttypes>
#include <functional>
#include <memory>
#include <random>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <thread>
#include <vector>
static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
// static RNG initialization (revisit if n_threads stops being constant)
static const size_t n_threads = std::thread::hardware_concurrency();
static std::vector<std::default_random_engine> generators = []() {
std::random_device rd;
std::vector<std::default_random_engine> vec;
vec.reserve(n_threads);
//for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed
for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); }
return vec;
}();
size_t size = ggml_nelements(tensor);
std::vector<float> data(size);
auto init_thread = [&](size_t ith, size_t start, size_t end) {
std::uniform_real_distribution<float> distribution(min, max);
for (size_t i = start; i < end; i++) {
data[i] = distribution(generators[ith]);
}
};
std::vector<std::thread> threads;
threads.reserve(n_threads);
for (size_t i = 0; i < n_threads; i++) {
size_t start = i*size/n_threads;
size_t end = (i+1)*size/n_threads;
threads.emplace_back(init_thread, i, start, end);
}
for (auto & t : threads) {
t.join();
}
#if 0
const char * val_str = getenv("GGML_TEST_EPS");
float val = 1e-9f;
if (val_str != nullptr) {
val = std::stof(val_str);
printf("GGML_TEST_EPS=%e\n", val);
}
// test quantization with very small values that may result in nan scales due to division by zero
if (ggml_is_quantized(tensor->type)) {
for (int i = 0; i < 256; i++) {
data[i] = val;
}
}
#endif
if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_I32) {
ggml_backend_tensor_set(tensor, data.data(), 0, size * sizeof(float));
} else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
GGML_ASSERT(size % ggml_blck_size(tensor->type) == 0);
std::vector<uint8_t> dataq(ggml_row_size(tensor->type, size));
std::vector<float> imatrix(tensor->ne[0], 1.0f); // dummy importance matrix
const float * im = imatrix.data();
if (!ggml_quantize_requires_imatrix(tensor->type)) {
// when the imatrix is optional, we want to test both quantization with and without imatrix
// use one of the random numbers to decide
if (data[0] > 0.5f*(min + max)) {
im = nullptr;
}
}
ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
// TODO: other cases
//#pragma omp parallel for
//for (int i = 0; i < tensor->ne[1]; i++) {
// ggml_quantize_chunk(tensor->type, data.data(), dataq.data(),
// i * tensor->ne[0], 1, tensor->ne[0], im);
//}
ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
} else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
// This is going to create some weird integers though.
ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor));
} else {
GGML_ABORT("fatal error");
}
}
static std::vector<float> tensor_to_float(const ggml_tensor * t) {
std::vector<float> tv;
tv.reserve(ggml_nelements(t));
std::vector<uint8_t> buf(ggml_nbytes(t));
ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t));
ggml_type_traits_t tt = ggml_internal_get_type_traits(t->type);
size_t bs = ggml_blck_size(t->type);
std::vector<float> vq(ggml_blck_size(t->type));
bool quantized = ggml_is_quantized(t->type);
// access elements by index to avoid gaps in views
for (int64_t i3 = 0; i3 < t->ne[3]; i3++) {
for (int64_t i2 = 0; i2 < t->ne[2]; i2++) {
for (int64_t i1 = 0; i1 < t->ne[1]; i1++) {
for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) {
size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0];
if (t->type == GGML_TYPE_F16) {
tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i]));
} else if (t->type == GGML_TYPE_BF16) {
tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i]));
} else if (t->type == GGML_TYPE_F32) {
tv.push_back(*(float *) &buf[i]);
} else if (t->type == GGML_TYPE_I32) {
tv.push_back((float)*(int32_t *) &buf[i]);
} else if (t->type == GGML_TYPE_I16) {
tv.push_back((float)*(int16_t *) &buf[i]);
} else if (t->type == GGML_TYPE_I8) {
tv.push_back((float)*(int8_t *) &buf[i]);
} else if (quantized) {
tt.to_float(&buf[i], vq.data(), bs);
tv.insert(tv.end(), vq.begin(), vq.end());
} else {
GGML_ABORT("fatal error");
}
}
}
}
}
return tv;
}
/*
static double cosine_similarity(const float * v1, const float * v2, size_t n) {
double dot = 0.0;
double mag1 = 0.0;
double mag2 = 0.0;
for (size_t i = 0; i < n; i++) {
if (std::isnan(v1[i]) || std::isnan(v2[i])) {
return -1.0f;
}
if (std::isinf(v1[i]) && std::isinf(v2[i])) {
continue;
}
dot += v1[i]*v2[i];
mag1 += v1[i]*v1[i];
mag2 += v2[i]*v2[i];
}
return dot/sqrt(mag1*mag2);
}
static float distance(const float * v1, const float * v2, size_t n) {
double d = 0.0;
for (size_t i = 0; i < n; i++) {
if (std::isnan(v1[i]) || std::isnan(v2[i])) {
return INFINITY;
}
if (std::isinf(v1[i]) && std::isinf(v2[i])) {
continue;
}
d += (v1[i] - v2[i])*(v1[i] - v2[i]);
}
return sqrt(d);
}
static float vec_len(const float * v, size_t n) {
double d = 0.0;
for (size_t i = 0; i < n; i++) {
if (std::isnan(v[i])) {
return INFINITY;
}
if (std::isinf(v[i])) {
continue;
}
d += v[i]*v[i];
}
return sqrt(d);
}
*/
// normalized mean squared error = mse(a, b) / mse(a, 0)
static double nmse(const float * a, const float * b, size_t n) {
double mse_a_b = 0.0;
double mse_a_0 = 0.0;
for (size_t i = 0; i < n; i++) {
float a_i = a[i];
float b_i = b[i];
mse_a_b += (a_i - b_i) * (a_i - b_i);
mse_a_0 += a_i * a_i;
}
return mse_a_b / mse_a_0;
}
// maximum absolute asymmetry between a and b
// asymmetry: (a - b) / (a + b)
// This is more stable than relative error if one of the values fluctuates towards zero.
// n: number of values to compare.
// expected_vals: optional vector of expected values for a. If expected_vals is not empty, filter out all comparisons where
// a does not match any of the expected values. Needed for noncontinuous gradients where the numerical calculation can fail.
static double mean_abs_asymm(const float * a, const float * b, const size_t n, const std::vector<float> & expected_vals) {
double sum = 0.0f;
size_t nvalid = 0;
for (size_t i = 0; i < n; i++) {
if (!expected_vals.empty()) {
bool matches_any = false;
for (const float & ev : expected_vals) {
if (fabsf(a[i] - ev) < 1e-3f) {
matches_any = true;
break;
}
}
if (!matches_any) {
continue;
}
}
const float asymm = (a[i] - b[i]) / (a[i] + b[i]);
sum += fabsf(asymm);
nvalid++;
}
return sum/nvalid;
}
// utils for printing the variables of the test cases
#define VAR_TO_STR(x) (#x "=" + var_to_str(x))
template<typename T>
static std::string var_to_str(const T & x) {
return std::to_string(x);
}
template<typename T, size_t N>
static std::string var_to_str(const T (&x)[N]) {
std::string s = "[";
for (size_t i = 0; i < N; i++) {
if (i > 0) {
s += ",";
}
s += var_to_str(x[i]);
}
s += "]";
return s;
}
template<typename T, size_t N>
static std::string var_to_str(const std::array<T, N> & x) {
std::string s = "[";
for (size_t i = 0; i < N; i++) {
if (i > 0) {
s += ",";
}
s += var_to_str(x[i]);
}
s += "]";
return s;
}
//static std::string var_to_str(ggml_unary_op unary_op) {
// return ggml_unary_op_name(unary_op);
//}
static std::string var_to_str(ggml_type type) {
return ggml_type_name(type);
}
static std::string var_to_str(ggml_op_pool pool) {
switch (pool) {
case GGML_OP_POOL_AVG: return "avg";
case GGML_OP_POOL_MAX: return "max";
default: return std::to_string(pool);
}
}
#define VARS_TO_STR1(a) VAR_TO_STR(a)
#define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b)
#define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c)
#define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d)
#define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e)
#define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f)
#define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g)
#define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h)
#define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i)
#define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j)
#define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k)
#define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l)
#ifdef GGML_USE_SYCL
static bool inline _isinf(float f) {
return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000;
}
#else
static bool inline _isinf(float f) { return std::isinf(f); }
#endif
// accept FLT_MAX as infinity
static bool isinf_or_max(float f) {
return _isinf(f) || f == FLT_MAX || f == -FLT_MAX;
}
static bool ggml_is_view_op(enum ggml_op op) {
return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
}
enum test_mode {
MODE_TEST,
MODE_PERF,
MODE_GRAD,
};
struct test_case {
virtual ~test_case() {}
virtual std::string op_desc(ggml_tensor * t) {
return ggml_op_desc(t);
}
virtual std::string vars() {
return "";
}
virtual ggml_tensor * build_graph(ggml_context * ctx) = 0;
virtual double max_nmse_err() {
return 1e-7;
}
virtual double max_maa_err() {
return 1e-4;
}
virtual float grad_eps(){
return 1e-1f;
}
// If false, estimate gradient with 2 points, neglects 3rd order derivative and higher.
// If true, estimate gradient with 4 points, neglects 5th order derivative and higher.
virtual bool grad_precise(){
return false;
}
// Skip gradient checks if total number of gradients to be checked is larger than this (to speed up the tests).
virtual int64_t grad_nmax() {
return 10000;
}
// No effect if empty.
// If not empty, skip all gradient checks where the numerical result does not match any of the values.
// Needed for dealing with noncontinuous gradients (e.g. ReLU) where estimation using finite differences is unreliable.
virtual std::vector<float> grad_expect() {
return {};
}
virtual void initialize_tensors(ggml_context * ctx) {
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
init_tensor_uniform(t);
}
}
virtual size_t op_size(ggml_tensor * t) {
size_t size = ggml_nbytes(t);
// add source tensors
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (t->src[i] != NULL) {
size += ggml_nbytes(t->src[i]);
}
}
return size;
}
ggml_cgraph * gf = nullptr;
ggml_cgraph * gb = nullptr;
static const int sentinel_size = 1024;
test_mode mode;
std::vector<ggml_tensor *> sentinels;
void add_sentinel(ggml_context * ctx) {
if (mode == MODE_PERF || mode == MODE_GRAD) {
return;
}
ggml_tensor * sentinel = ::ggml_new_tensor_1d(ctx, GGML_TYPE_F32, sentinel_size);
ggml_format_name(sentinel, "sent_%zu", sentinels.size());
sentinels.push_back(sentinel);
}
// hijack ggml_new_tensor to add sentinels after each tensor to check for overflows in the backend
ggml_tensor * ggml_new_tensor(ggml_context * ctx, ggml_type type, int n_dims, const int64_t * ne) {
ggml_tensor * t = ::ggml_new_tensor(ctx, type, n_dims, ne);
add_sentinel(ctx);
return t;
}
ggml_tensor * ggml_new_tensor_1d(ggml_context * ctx, ggml_type type, int64_t ne0) {
ggml_tensor * t = ::ggml_new_tensor_1d(ctx, type, ne0);
add_sentinel(ctx);
return t;
}
ggml_tensor * ggml_new_tensor_2d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1) {
ggml_tensor * t = ::ggml_new_tensor_2d(ctx, type, ne0, ne1);
add_sentinel(ctx);
return t;
}
ggml_tensor * ggml_new_tensor_3d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) {
ggml_tensor * t = ::ggml_new_tensor_3d(ctx, type, ne0, ne1, ne2);
add_sentinel(ctx);
return t;
}
ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3);
add_sentinel(ctx);
return t;
}
bool eval(ggml_backend_t backend1, ggml_backend_t backend2, const char * op_name) {
mode = MODE_TEST;
ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
};
ggml_context * ctx = ggml_init(params);
GGML_ASSERT(ctx);
gf = ggml_new_graph(ctx);
// pre-graph sentinel
add_sentinel(ctx);
ggml_tensor * out = build_graph(ctx);
if (op_name != nullptr && op_desc(out) != op_name) {
//printf(" %s: skipping\n", op_desc(out).c_str());
ggml_free(ctx);
return true;
}
printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
fflush(stdout);
// check if the backends support the ops
bool supported = true;
for (ggml_backend_t backend : {backend1, backend2}) {
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (!ggml_backend_supports_op(backend, t)) {
printf("not supported [%s] ", ggml_backend_name(backend));
supported = false;
break;
}
}
}
if (!supported) {
printf("\n");
ggml_free(ctx);
return true;
}
// post-graph sentinel
add_sentinel(ctx);
// allocate
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1);
if (buf == NULL) {
printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1));
ggml_free(ctx);
return false;
}
// build graph
ggml_build_forward_expand(gf, out);
// add sentinels as graph nodes so that they are checked in the callback
for (ggml_tensor * sentinel : sentinels) {
ggml_graph_add_node(gf, sentinel);
}
// randomize tensors
initialize_tensors(ctx);
// compare
struct callback_userdata {
bool ok;
double max_err;
ggml_backend_t backend1;
ggml_backend_t backend2;
};
callback_userdata ud {
true,
max_nmse_err(),
backend1,
backend2
};
auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool {
callback_userdata * ud = (callback_userdata *) user_data;
const char * bn1 = ggml_backend_name(ud->backend1);
const char * bn2 = ggml_backend_name(ud->backend2);
if (t1->op == GGML_OP_NONE) {
// sentinels must be unchanged
std::vector<uint8_t> t1_data(ggml_nbytes(t1));
std::vector<uint8_t> t2_data(ggml_nbytes(t2));
ggml_backend_tensor_get(t1, t1_data.data(), 0, ggml_nbytes(t1));
ggml_backend_tensor_get(t2, t2_data.data(), 0, ggml_nbytes(t2));
if (memcmp(t1_data.data(), t2_data.data(), ggml_nbytes(t1)) != 0) {
printf("sentinel mismatch: %s ", t1->name);
ud->ok = false;
return true;
}
}
std::vector<float> f1 = tensor_to_float(t1);
std::vector<float> f2 = tensor_to_float(t2);
for (size_t i = 0; i < f1.size(); i++) {
// check for nans
if (std::isnan(f1[i]) || std::isnan(f2[i])) {
printf("[%s] NaN at index %zu (%s=%f %s=%f) ", ggml_op_desc(t1), i, bn1, f1[i], bn2, f2[i]);
ud->ok = false;
return true;
}
// check for infs: both must be inf of the same sign, or both must be finite
if (isinf_or_max(f1[i]) || isinf_or_max(f2[i])) {
if (isinf_or_max(f1[i]) && isinf_or_max(f2[i])) {
if (std::signbit(f1[i]) != std::signbit(f2[i])) {
printf("[%s] inf sign mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
ud->ok = false;
return true;
}
} else {
printf("[%s] inf mismatch: %s=%f %s=%f ", ggml_op_desc(t1), bn1, f1[i], bn2, f2[i]);
ud->ok = false;
return true;
}
}
}
double err = nmse(f1.data(), f2.data(), f1.size());
if (err > ud->max_err) {
printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
//for (int i = 0; i < (int) f1.size(); i++) {
// printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
//}
//printf("\n");
//exit(1);
ud->ok = false;
}
return true;
GGML_UNUSED(index);
};
const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud);
if (!cmp_ok) {
printf("compare failed ");
}
ggml_backend_buffer_free(buf);
ggml_free(ctx);
if (ud.ok && cmp_ok) {
printf("\033[1;32mOK\033[0m\n");
return true;
}
printf("\033[1;31mFAIL\033[0m\n");
return false;
}
bool eval_perf(ggml_backend_t backend, const char * op_name) {
mode = MODE_PERF;
static const size_t graph_nodes = 8192;
ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
};
ggml_context * ctx = ggml_init(params);
GGML_ASSERT(ctx);
ggml_tensor * out = build_graph(ctx);
if (op_name != nullptr && op_desc(out) != op_name) {
//printf(" %s: skipping\n", op_desc(out).c_str());
ggml_free(ctx);
return true;
}
int len = printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
fflush(stdout);
// check if backends support op
if (!ggml_backend_supports_op(backend, out)) {
printf("not supported\n");
ggml_free(ctx);
return true;
}
// align while also leaving some margin for variations in parameters
int align = 20;
int last = (len + align - 1) / align * align;
if (last - len < 5) {
last += align;
}
last = std::max(last, 60);
printf("%*s", last - len, "");
// allocate
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
if (buf == NULL) {
printf("failed to allocate tensors\n");
ggml_free(ctx);
return false;
}
// randomize tensors
initialize_tensors(ctx);
// build graph
ggml_cgraph * gf = ggml_new_graph_custom(ctx, graph_nodes, false);
ggml_build_forward_expand(gf, out);
// warmup run
ggml_backend_graph_compute(backend, gf);
// duplicate the op
size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
int n_runs = std::min((size_t) ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_size / op_size(out)) + 1;
for (int i = 1; i < n_runs; i++) {
ggml_graph_add_node(gf, out);
}
// calculate memory
size_t mem = n_runs * op_size(out);
auto tensor_op_size = [](ggml_tensor * t) {
size_t size = ggml_nbytes(t);
// add source tensors
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (t->src[i] != NULL) {
size += ggml_nbytes(t->src[i]);
}
}
return size;
};
for (int i = 0; i < ggml_graph_n_nodes(gf); ++i) {
if (ggml_is_view_op(ggml_graph_node(gf, i)->op) || ggml_graph_node(gf, i) == out) {
continue;
}
mem += tensor_op_size(ggml_graph_node(gf, i));
}
// run
ggml_backend_synchronize(backend);
int64_t start_time = ggml_time_us();
ggml_backend_graph_compute(backend, gf);
ggml_backend_synchronize(backend);
int64_t end_time = ggml_time_us();
double time_us = end_time - start_time;
printf(" %5d runs - %8.2f us/run - %8zu kB/run - \033[1;34m%7.2f GB/s\033[0m\n",
n_runs,
time_us / n_runs,
op_size(out) / 1024,
mem / (time_us/1e6) / 1024.0 / 1024.0 / 1024.0);
ggml_backend_buffer_free(buf);
ggml_free(ctx);
return true;
}
bool eval_grad(ggml_backend_t backend, const char * op_name) {
mode = MODE_GRAD;
const std::vector<float> expect = grad_expect();
ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*128 + 2*ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, true),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
};
ggml_context * ctx = ggml_init(params);
GGML_ASSERT(ctx);
gf = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, true);
gb = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, true);
ggml_tensor * out = build_graph(ctx);
if (op_name != nullptr && op_desc(out) != op_name) {
//printf(" %s: skipping\n", op_desc(out).c_str());
ggml_free(ctx);
return true;
}
printf(" %s(%s): ", op_desc(out).c_str(), vars().c_str());
fflush(stdout);
if (out->grad == nullptr) {
printf("backwards pass not supported \n");
ggml_free(ctx);
return true;
}
if (out->type != GGML_TYPE_F32) {
ggml_free(ctx);
printf("not supported [%s->type != FP32]\n", out->name);
return true;
}
// check if the backend supports the ops
bool supported = true;
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (!ggml_backend_supports_op(backend, t)) {
printf("not supported [%s] ", ggml_backend_name(backend));
supported = false;
break;
}
if ((t->flags & GGML_TENSOR_FLAG_PARAM) && t->type != GGML_TYPE_F32) {
printf("not supported [%s->type != FP32] ", t->name);
supported = false;
break;
}
}
if (!supported) {
printf("\n");
ggml_free(ctx);
return true;
}
int64_t ngrads = 0;
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (t->flags & GGML_TENSOR_FLAG_PARAM) {
ngrads += ggml_nelements(t);
}
}
if (ngrads > grad_nmax()) {
printf("skipping large tensors for speed \n");
ggml_free(ctx);
return true;
}
if (!ggml_is_scalar(out)) {
out = ggml_sum(ctx, out);
ggml_set_name(out, "sum_of_out");
}
ggml_build_forward_expand(gf, out);
ggml_graph_cpy(gf, gb);
ggml_build_backward_expand(ctx, gf, gb, false);
if (expect.size() != 1 || expect[0] != 0.0f) {
GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf));
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || t->grad->op != GGML_OP_NONE);
}
}
// TODO: refactor so that this check is only needed once
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
if (!ggml_backend_supports_op(backend, t)) {
printf("not supported [%s] ", ggml_backend_name(backend));
supported = false;
break;
}
if ((t->flags & GGML_TENSOR_FLAG_PARAM) && t->type != GGML_TYPE_F32) {
printf("not supported [%s->type != FP32] ", t->name);
supported = false;
break;
}
}
if (!supported) {
printf("\n");
ggml_free(ctx);
return true;
}
// allocate
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend);
if (buf == NULL) {
printf("failed to allocate tensors [%s] ", ggml_backend_name(backend));
ggml_free(ctx);
return false;
}
// randomize tensors
initialize_tensors(ctx);
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
if (!t->grad) {
continue;
}
std::vector<float> tmp(ggml_nelements(t->grad));
ggml_backend_tensor_set(t->grad, tmp.data(), 0, ggml_nbytes(t->grad));
}
// build graphs
const float onef = 1.0f;
ggml_backend_graph_compute(backend, gf);
ggml_backend_tensor_set(out->grad, &onef, 0, ggml_nbytes(out->grad));
ggml_backend_graph_compute(backend, gb);
bool ok = true;
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
if (!(t->flags & GGML_TENSOR_FLAG_PARAM)) {
continue;
}
const char * bn = ggml_backend_name(backend);
const int64_t ne = ggml_nelements(t);
std::vector<float> ga = tensor_to_float(t->grad);
for (int64_t i = 0; i < ne; ++i) { // gradient algebraic
// check for nans
if (!std::isfinite(ga[i])) {
printf("[%s] nonfinite gradient at index %" PRId64 " (%s=%f) ", ggml_op_desc(t), i, bn, ga[i]);
ok = false;
break;
}
}
if (!ok) {
break;
}
std::vector<float> gn(ne); // gradient numeric
GGML_ASSERT(ga.size() == gn.size());
std::vector<float> x0 = tensor_to_float(t); // original t data
GGML_ASSERT(ggml_is_scalar(out));
GGML_ASSERT(out->type == GGML_TYPE_F32);
const float eps = grad_eps();
for (int64_t i = 0; i < ne; ++i) {
const float xiu = x0[i] + 1.0f*eps; // x, index i, up
const float xiuh = x0[i] + 0.5f*eps; // x, index i, up half
const float xidh = x0[i] - 0.5f*eps; // x, index i, down half
const float xid = x0[i] - 1.0f*eps; // x, index i, down
float fu, fuh, fdh, fd; // output values for xiu, xiuh, xid, xidh
ggml_backend_tensor_set(t, &xiu, i*sizeof(float), sizeof(float));
ggml_backend_graph_compute(backend, gf);
ggml_backend_tensor_get(out, &fu, 0, ggml_nbytes(out));
ggml_backend_tensor_set(t, &xid, i*sizeof(float), sizeof(float));
ggml_backend_graph_compute(backend, gf);
ggml_backend_tensor_get(out, &fd, 0, ggml_nbytes(out));
if (grad_precise()) {
ggml_backend_tensor_set(t, &xiuh, i*sizeof(float), sizeof(float));
ggml_backend_graph_compute(backend, gf);
ggml_backend_tensor_get(out, &fuh, 0, ggml_nbytes(out));
ggml_backend_tensor_set(t, &xidh, i*sizeof(float), sizeof(float));
ggml_backend_graph_compute(backend, gf);
ggml_backend_tensor_get(out, &fdh, 0, ggml_nbytes(out));
gn[i] = (8.0*(double)fuh + (double)fd - (8.0*(double)fdh + (double)fu)) / (6.0*(double)eps);
} else {
gn[i] = (fu - fd) / (2.0f*eps);
}
ggml_backend_tensor_set(t, x0.data(), 0, ggml_nbytes(t));
}
const double err = mean_abs_asymm(gn.data(), ga.data(), gn.size(), expect);
if (err > max_maa_err()) {
printf("[%s] MAA = %.9f > %.9f ", ggml_op_desc(t), err, max_maa_err());
ok = false;
break;
}
if (!ok) {
break;
}
}
if (!ok) {
printf("compare failed ");
}
ggml_backend_buffer_free(buf);
ggml_free(ctx);
if (ok) {
printf("\033[1;32mOK\033[0m\n");
return true;
}
printf("\033[1;31mFAIL\033[0m\n");
return false;
}
};
// ###################################
// ## Section 2: GGML Op Defintions ##
// ###################################
// The following is an example showing the bare minimum for creating a test for a GGML op.
// GGML_OP_EXAMPLE
struct test_example : public test_case {
// Always define these 2 or variants thereof:
const ggml_type type; // The type of the input tensors.
const std::array<int64_t, 4> ne; // The shape of the input tensors.
// For some ops it's necessary to define multiple types or shapes for the inputs.
// Or they may need additional parameters.
// Put all parameters needed to fully define the test into one of the VARS_TO_STR macros.
// In most cases these are just the properties of the struct that you defined above.
// This is needed for info prints.
std::string vars() override {
return VARS_TO_STR2(type, ne);
}
// Define a constructor for the struct.
// In most cases it will be sufficient to have the same arguments as the struct has properties
// and just use initializer lists.
test_example(ggml_type type = GGML_TYPE_F32,
std::array<int64_t, 4> ne = {10, 5, 4, 3})
: type(type), ne(ne) {}
// Define how a simple GGML compute graph can be constructed for the new GGML op.
ggml_tensor * build_graph(ggml_context * ctx) override {
// Step 1: create input tensors that don't depend on any other tensors:
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
ggml_set_name(a, "a"); // Setting names is optional but it's useful for debugging.
ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data());
ggml_set_name(b, "b");
// Step 2: use the op that you want to test in the GGML compute graph.
ggml_tensor * out = ggml_add(ctx, a, b); // For this example we're just doing a simple addition.
ggml_set_name(out, "out");
// Step 3: return the output tensor.
return out;
}
// In order to also check the gradients for your op, add calls like ggml_set_param(ctx, a)
// immediately after you create the tensors.
// This is optional and only makes sense if a backwards pass has actually been implemented for the new op.
};