-
Notifications
You must be signed in to change notification settings - Fork 0
/
jsjpeg.js
1131 lines (1050 loc) · 40 KB
/
jsjpeg.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
* Decode a JPEG image into an HTML Canvas
*
* Rough algorithm from the JPEG spec:
*
* while (no_eoi_marker) {
* while (no_sos_marker) {
* interpret_markers()
* }
* decode_scan() {
* interpret_header()
* while (more_intervals) {
* decode_restart_interval() {
* reset_decoder()
* while (more_mcus) {
* decode_mcu() {
* foreach(dataunit) {
* decode_data_unit() {
* // A DU is an 8x8 block of samples
* decode_dc_coeff()
* decode_ac_coeff()
* dequantize()
* idct()
* }
* }
* }
* }
* find_next_marker()
* }
* }
* }
* }
*/
const ZIGZAG = [
0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63
];
const QUANT_TABLE_SIZE = 64;
const NUM_HUFFMAN_LENGTHS = 16;
const DATA_UNIT_SIZE = 8;
let img = {};
/**
* Returns the high 4 bits of the byte
*
* @param {*} byte
*/
function getHighNibble(byte) {
return byte >> 4;
}
/**
* Returns the low 4 bits of the byte
*
* @param {*} byte
*/
function getLowNibble(byte) {
return byte & 0x0F;
}
/**
* Generate lookup table for marker codes (Table B.1)
*/
function createMarkerCodeTable() {
// Code assignments
codes = new Map([
// SOF markers, non-differential, Huffman-coding
[0xC0, "SOF0"], // Baseline DCT
[0xC1, "SOF1"], // Extended sequential DCT
[0xC2, "SOF2"], // Progressive DCT
[0xC3, "SOF3"], // Lossless (sequential)
// SOF markers, differential, Huffman-coding
[0xC5, "SOF5"], // Differential sequenctial DCT
[0xC6, "SOF6"], // Differential progressive DCT
[0xC7, "SOF7"], // Differential lossless (sequential)
// SOF markers, non-differential, arithmetic coding
[0xC8, "JPG"], // (Reserved)
[0xC9, "SOF9"], // Extended sequential DCT
[0xCA, "SOF10"], // Progressive DCT
[0xCB, "SOF11"], // Lossless (sequential)
// SOF markers, differential, arithmetic coding
[0xCD, "SOF13"], // Differential sequential DCT
[0xCE, "SOF14"], // Differential progressive DCT
[0xCF, "SOF15"], // Differential lossless (sequential)
// Huffman table spec
[0xC4, "DHT"], // Define Huffman table(s)
// Arithmetic coding conditioning spec
[0xCC, "DAC"], // Define arithmetic coding conditioning(s)
// Other markers
[0xD8, "SOI*"], // Start of image
[0xD9, "EOI*"], // End of image
[0xDA, "SOS"], // Start of sequence
[0xDB, "DQT"], // Define quantization table
[0xDC, "DNL"], // Define number of lines
[0xDD, "DRI"], // Define restart interval
[0xDE, "DHP"], // Define hierarchical progression
[0xDF, "EXP"], // Expand reference component(s)
[0xFE, "COM"], // Comment
// Reserved markers
[0x01, "TEM*"], // For temp private in arithmetic coding
]);
// Restart interval termination
for (let i = 0; i < 8; i++) {
codes.set(0xD0 + i, "RST" + i); // Restart with modulo 8 count 'm'
}
// Reserved for application segmentation
for (let i = 0; i <= 0xF; i++) {
codes.set(0xE0 + i, "APP" + i.toString(16).toUpperCase());
}
// Reserved for JPEG extensions
for (let i = 0; i <= 0xD; i++) {
codes.set(0xF0 + i, "JPG" + i.toString(16).toUpperCase());
}
// Reserved
for (let i = 2; i <= 0xBF; i++) {
codes.set(0x00 + i, "RES");
}
return codes;
}
/**
* Entry point. Reads the specified file and passes it to the parser.
*
* @param {File} fileSpec
*/
function readFile(fileSpec) {
let reader = new FileReader();
reader.onload = function (evt) {
let result = evt.target.result;
parseFile(result);
};
reader.onerror = function (evt) {
alert("error: " + evt);
}
reader.readAsArrayBuffer(fileSpec);
}
/**
*
*
* @param {ArrayBuffer} data
*/
function parseFile(data) {
let codes = createMarkerCodeTable();
let reader = new DataViewReader(new DataView(data));
let seenSOI = false;
let seeingJunk = false;
while (reader.hasMoreBytes()) {
// Look for markers / parameters
let byte = reader.nextByte();
if (byte != 0xFF) {
if (!seeingJunk) {
console.log(reader.currentIndex() + " Found junk byte(s) starting at: " + reader.currentIndex());
seeingJunk = true;
}
continue; // Junk
} else if (seeingJunk) {
console.log(reader.currentIndex() + " Last junk byte was at " + (reader.currentIndex() - 1));
seeingJunk = false;
}
if (!reader.hasMoreBytes()) {
console.log("Unexpected EOF");
}
byte = reader.nextByte();
let marker = codes.get(byte);
if (marker === undefined) {
console.log(reader.currentIndex() + ": Couldn't find marker for " + byte);
continue;
}
console.log(reader.currentIndex() + ": Found " + marker + " for " + byte);
if (marker === "SOI*") {
// We should ignore everything before we see an SOI
seenSOI = true;
img.restartInterval = 0;
continue;
} else if (seenSOI === false) {
console.log("Error: Missing SOI");
break;
}
// Tables / Misc
if (marker === "DHT") {
parseHuffmanTable(marker, reader, img);
} else if (marker === "DAC") {
// Arithmetic coding not supported
parseUnsupportedSegment(marker, reader);
} else if (marker === "DQT") {
parseQuantizationTable(marker, reader, img);
} else if (marker === "DRI") {
parseRestartInterval(marker, reader, img);
} else if (marker === "COM") {
parseUnsupportedSegment(marker, reader);
} else if (marker.startsWith("APP")) {
// Look for JFIF
parseAppSegment(marker, reader);
}
// Interpret frame data
else if (marker === "SOF0") {
seenSOF = true;
decodeFrame(marker, reader, img);
} else if (marker.startsWith("RST")) {
// TODO: Need to parse this! (We shouldn't come across an RST in this code
// path; it should be found + consumed during processing of a scan)
parseUnsupportedSegment(marker, reader);
} else if (marker === "EOI*") {
console.log("Found EOI marker. Bye!");
break;
} else if (marker === "SOS") {
img.scan = parseStartOfSequence(marker, reader);
decodeScan(marker, reader, img, img.scan);
} else if (marker === "DNL") {
// TODO: We shouldn't come across this in this part of the parser,
// and even if we didn't we wouldn't know what to do with it.
// Best thing to do is to preprocess the file to extract the # of lines
// from the DNL header, so we don't have to check for DNL while decoding a scan
// (Also, DNL is rarely used in practice, many decoders don't support it...)
parseDNL(reader, img);
} else if (marker === "DHP") {
// DHP only used in hierarchical images
parseUnsupportedSegment(marker, reader);
} else if (marker === "EXP") {
// EXP not supported; assume all components are fullsize
parseUnsupportedSegment(marker, reader);
}
// Misc stuff we ignore
else if (marker.startsWith("RES")) {
parseUnsupportedSegment(marker, reader);
} else if (marker.startsWith("SOC")) {
// Only baseline profile is supported
parseUnsupportedSegment(marker, reader);
} else if (marker.startsWith("JPG")) {
parseUnsupportedSegment(marker, reader);
} else if (marker === "TEM*") {
parseUnsupportedSegment(marker, reader);
} else {
console.log(marker + " segment not handled yet");
}
}
}
/**
* Process an segment we don't support. Effectively a NOOP for now.
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function parseUnsupportedSegment(marker, reader) {
let length = reader.nextWord();
console.log(" Length of " + marker + " segment: " + length);
// Nothing to do, skip the segment
reader.skip(length - 2); // Length includes the 2 bytes that describe length
}
/**
* Decode a frame of the image
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function decodeFrame(marker, reader, img) {
let length = reader.nextWord();
console.log(" Length of " + marker + " segment: " + length);
let precision = reader.nextByte();
let frameY = reader.nextWord();
let frameX = reader.nextWord();
let numComponents = reader.nextByte();
let components = [];
for (let i = 0; i < numComponents; i++) {
let component = {};
component.componentID = reader.nextByte(); // Component ID (0-255)
let tmp = reader.nextByte();
component.hSampleFactor = getHighNibble(tmp); // H sampling factor (1-4)
component.vSampleFactor = getLowNibble(tmp); // V sampling factor (1-4)
component.quantTableID = reader.nextByte(); // quant table to use (0-3)
components[i] = component;
}
frame = {};
frame.precision = precision;
frame.frameY = frameY;
frame.frameX = frameX;
frame.numComponents = numComponents;
frame.components = components;
img.frame = frame;
console.log("precision: " + precision + ", frameY: " + frameY + ", frameX: " + frameX
+ ", #components: " + numComponents);
for (let i = 0; i < numComponents; i++) {
console.log(" Component[" + i + "]: (id, H, V, Tq) " + components[i])
}
// Figure out component dimensions (A.1.1)
let hmax = 0; // Max H sampling factor across all components
let vmax = 0; // Max V sampling factor across all components
for (component of components) {
hmax = Math.max(hmax, component.hSampleFactor);
vmax = Math.max(vmax, component.vSampleFactor);
}
// Calculate # of horizontal + vertical MCUs in the image
frame.hMCUs = Math.ceil(frame.frameX / DATA_UNIT_SIZE / hmax);
frame.vMCUS = Math.ceil(frame.frameY / DATA_UNIT_SIZE / vmax);
// Calculate size in pixels of output buffer (which may be bigger than the image)
frame.outputX = frame.hMCUs * hmax * DATA_UNIT_SIZE;
frame.outputY = frame.vMCUS * vmax * DATA_UNIT_SIZE;
// Calculate component dimensions (x,y)
for (component of components) {
component.hSize = Math.ceil(frame.outputX * (component.hSampleFactor / hmax)); // size in X
component.vSize = Math.ceil(frame.outputY * (component.vSampleFactor / vmax)); // size in Y
console.log(" Component[" + component.componentID + "] size: "
+ component.hSize + "x" + component.vSize);
}
// Set up output buffers for each component
for (component of components) {
component.imgBuff = new Uint8Array(component.hSize * component.vSize); // component x * y
component.outputBuff = new Uint8Array(frame.outputX * frame.outputY); // Image size
}
}
/**
* Parse Start of Sequence (scan) marker.
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function parseStartOfSequence(marker, reader, img) {
let length = reader.nextWord();
console.log(" Length of " + marker + " segment: " + length);
let numComponents = reader.nextByte();
let components = [];
for (let i = 0; i < numComponents; i++) {
let component = [];
component[0] = reader.nextByte(); // Scan component selector
let tmp = reader.nextByte();
component[1] = getHighNibble(tmp); // DC entropy coding selector
component[2] = getLowNibble(tmp); // AC entropy coding selector
components[i] = component;
}
let ss = reader.nextByte(); // Start of predictor selection
if (ss != 0) {
console.error("Expected SS=0, got " + ss + ". Not a sequential DCT scan?");
}
let se = reader.nextByte(); // End of predictor selection
if (se != 63) {
console.error("Expected SE=63, got " + se + ". Not a sequential DCT scan?");
}
let tmp = reader.nextByte();
let ah = getHighNibble(tmp); // Successive approx. bit position high
let al = getLowNibble(tmp); // Successive approx. bit position low
console.log("numComponents: " + numComponents + ", ss: " + ss + ", se: "
+ se + ", ah: " + ah + ", al: " + al);
for (let i = 0; i < numComponents; i++) {
console.log(" Component[" + i + "]: (Cs, Td, Ta) " + components[i]);
}
let scan = {};
scan.numComponents = numComponents;
scan.components = components;
scan.ss = ss;
scan.se = se;
scan.ah = ah;
scan.al = al;
// Workspace to store DC + AC coefficients when decoding a block
scan.block = new Array(DATA_UNIT_SIZE * DATA_UNIT_SIZE);
// Scratch space used when reordering coefficients from zig-zag order
scan.scratch = new Array(DATA_UNIT_SIZE * DATA_UNIT_SIZE);
return scan;
}
/**
* Sets the value of a pixel at the given coordinates
*
* @param {*} data Data buffer of image data
* @param {*} hSize Width of the image in pixels
* @param {*} x
* @param {*} y
* @param {*} pixel
*/
function setPixel(data, hSize, x, y, pixel) {
let lineStride = hSize * 4; // Pixel stride is 4
let xy = (y * lineStride) + (x * 4);
data[xy ] = pixel[0];
data[xy + 1] = pixel[1];
data[xy + 2] = pixel[2];
data[xy + 3] = pixel[3];
}
/**
* Parse Start of Sequence marker
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function decodeScan(marker, reader, img, scan) {
// Scratch space for DC predictor. Indexed by component ID (therefore usually 1-indexed)
scan.dcpred = [];
// Choose IDCT function
let idctType = document.querySelector('input[name="idctType"]:checked').value;
switch(idctType) {
case "calculated": idctFn = idct; break;
case "cached": idctFn = idctCached; break;
case "chenwang": idctFn = idctChenWang; break;
default:
console.log(`Warning: unknown IDCT type ${idctType}`);
idctFn = idct;
}
// Choose colorspace conversion function
let colorConvFn;
let colorConvType = document.querySelector('input[name="colorConvType"]:checked').value;
switch(colorConvType) {
case "float": colorConvFn = YCbCrToRGB; break;
case "integer": colorConvFn = YCbCrToRGBInt; break;
default:
console.log(`Warning: unknown colorspace conversion type ${colorConvType}`);
colorConvFn = YCbCrToRGB;
}
// Order components in order specified by scan selector (B.2.3)
let components = [];
for (selector of scan.components) {
for (imgComponent of img.frame.components) {
let id = imgComponent.componentID;
if (id === selector[0]) {
components.push(imgComponent);
scan.dcpred[id] = 0; // Reset DC pred for each component (E.2.4)
break;
}
}
}
// Image components in the order specified by the scan selector
scan.orderedComponents = components;
// Note: data is always interleaved if there is more than 1 component (A.2)
// Read compressed data; decode it....
let mcuIndex = 0;
for (let v = 0; v < img.frame.vMCUS; v++) {
for (let h = 0; h < img.frame.hMCUs; h++) {
// console.log("Decoding MCU " + mcuIndex);
if (img.restartInterval > 0 && mcuIndex > 0 && (mcuIndex % img.restartInterval) === 0) {
// Look for a restart marker; file is likely corrupt if we don't find one
let byte = reader.nextByte();
if (byte != 0xFF) {
console.error("Expected 0xFF when looking for restart marker, found "
+ byte.toString(16));
}
byte = reader.nextByte();
if (byte >= 0xD0 && byte <= 0xD7) {
// console.log("Restart marker " + byte.toString(16) + ", resetting decoder");
} else {
console.error("Found unexpected marker when looking for reset: " + byte.toString(16));
}
// Found restart marker, so reset decoder (F.2.1.3.1)
scan.dcpred.fill(0);
reader.align(); // Align to the next byte
}
decodeMCU(reader, img, scan, v, h);
mcuIndex++;
}
}
// Draw the components at their native size, then scale up to the image's size,
// then draw the components at the image's size. Finally, convert the components
// to RGB and draw the combined image to the page.
for (component of components) {
drawComponent(component);
scaleComponent(component);
drawComponentFullSize(component);
}
combineComponents(components, colorConvFn);
document.getElementById("outputhsize").textContent = img.frame.outputX;
document.getElementById("outputvsize").textContent = img.frame.outputY;
}
/**
* Convert the 3 YCrCB components to RGB and combine them into a single output image,
* then draw the image to the output canvas.
*
* @param {*} components List of components
* @param {*} colorConvFn Function to use for colorspace conversion
*/
function combineComponents(components, colorConvFn) {
// Combine images via YCbCr --> YUV conversion
let canvas = document.getElementById("outputCanvas");
canvas.setAttribute("width", img.frame.outputX);
canvas.setAttribute("height", img.frame.outputY);
let ctx = canvas.getContext("2d");
let imgData = ctx.createImageData(canvas.width, canvas.height);
let data = imgData.data;
let Y, Cb, Cr;
let pixel = [0, 0, 0, 0];
let width = img.frame.outputX;
let height = img.frame.outputY;
let index = 0;
if (frame.numComponents === 1) {
// JFIF grayscale
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++, index++) {
Y = components[0].outputBuff[index];
pixel = [Y, Y, Y, 255];
setPixel(data, width, x, y, pixel);
}
}
} else if (frame.numComponents === 3) {
// JFIF YcbCr
let buff0 = components[0].outputBuff;
let buff1 = components[1].outputBuff;
let buff2 = components[2].outputBuff;
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++, index++) {
Y = buff0[index];
Cb = buff1[index];
Cr = buff2[index];
colorConvFn(Y, Cb, Cr, pixel);
setPixel(data, width, x, y, pixel);
}
}
} else {
console.error("Error: Image has " + frame.numComponents + " components; we only support 1 or 3 for JFIF");
}
ctx.putImageData(imgData, 0, 0);
}
/**
* Scale the component from its native size to the image's size, placing the output
* into the component's outputBuff
*
* @param {*} component
*/
function scaleComponent(component) {
let hScale = img.frame.outputX / component.hSize; // Factor to scale component up to output side
let vScale = img.frame.outputY / component.vSize; // Factor to scale component up to output side
if (hScale === 1 && vScale === 1) { // Optimization
component.outputBuff.set(component.imgBuff, 0);
return;
}
for (let sy = 0, dy = 0; sy < component.vSize; sy++, dy+=vScale) {
let srcLineStart = sy * component.hSize;
for (let sx = 0, dx = 0; sx < component.hSize; sx++, dx+=hScale) {
let val = component.imgBuff[srcLineStart + sx];
for (let y = 0; y < vScale; y++) {
for (let x = 0; x < hScale; x++) {
let destY = dy + y;
let destX = dx + x;
component.outputBuff[(destY * img.frame.outputX) + destX] = val;
}
}
}
}
}
/**
* Draw the component to the page, scaled up to the size of the image
*
* @param {*} component
*/
function drawComponentFullSize(component) {
let id = component.componentID;
let canvas = document.getElementById("component" + id + "ScaledCanvas");
canvas.setAttribute("width", img.frame.outputX);
canvas.setAttribute("height", img.frame.outputY);
let ctx = canvas.getContext("2d");
let imgData = ctx.createImageData(canvas.width, canvas.height);
let data = imgData.data;
let pixel = [0, 0, 0, 255]; // 255 for alpha channel
for (let y = 0; y < img.frame.outputY; y++) {
let srcLineStart = y * img.frame.outputX;
for (let x = 0; x < img.frame.outputX; x++) {
pixel[0] = pixel[1] = pixel[2] = component.outputBuff[srcLineStart + x];
setPixel(data, img.frame.outputX, x, y, pixel);
}
}
ctx.putImageData(imgData, 0, 0);
}
/**
* Draw the component to the page at its native size
*
* @param {*} component
*/
function drawComponent(component) {
let id = component.componentID;
let compHSizeStr = document.getElementById("component" + id + "hsize");
compHSizeStr.textContent = component.hSize;
let compVSizeStr = document.getElementById("component" + id + "vsize");
compVSizeStr.textContent = component.vSize;
let canvas = document.getElementById("component" + id + "Canvas");
canvas.setAttribute("width", component.hSize);
canvas.setAttribute("height", component.vSize);
let ctx = canvas.getContext("2d");
let imgData = ctx.createImageData(canvas.width, canvas.height);
let data = imgData.data;
let pixel = [0, 0, 0, 255]; // 255 for alpha channel
for (let y = 0; y < component.vSize; y++) {
let srcLineStart = y * component.hSize;
for (let x = 0; x < component.hSize; x++) {
pixel[0] = pixel[1] = pixel[2] = component.imgBuff[srcLineStart + x];
setPixel(data, component.hSize, x, y, pixel);
}
}
ctx.putImageData(imgData, 0, 0);
}
/**
* Perform YCbCr to RGB colorspace conversion using the algorithm in the JFIF spec.
*
* Writes the RGB values (and sets alpha = 255) into pixel
*
* @param {*} Y
* @param {*} Cb
* @param {*} Cr
* @param {*} pixel 4-element array of RGBA
*/
function YCbCrToRGB(Y, Cb, Cr, pixel) {
pixel[0] = clamp(Math.round(Y + (1.402 * (Cr - 128))));
pixel[1] = clamp(Math.round(Y - (0.34414 * (Cb - 128)) - (0.71414 * (Cr - 128))));
pixel[2] = clamp(Math.round(Y + 1.772 * (Cb - 128)));
pixel[3] = 255;
}
/**
* Integer implementation of YCbCr to RGB colorspace conversion.
*
* Writes the RGB values (and sets alpha = 255) into pixel
*
* @param {*} Y
* @param {*} Cb
* @param {*} Cr
* @param {*} pixel 4-element array of RGBA
*/
function YCbCrToRGBInt(Y, Cb, Cr, pixel) {
Y = Y << 8;
Cb -= 128;
Cr -= 128;
pixel[0] = clamp((Y + (359 * Cr) + 128) >> 8);
pixel[1] = clamp((Y - (88 * Cb) - (183 * Cr) + 128) >> 8);
pixel[2] = clamp((Y + (454 * Cb) + 128) >> 8);
pixel[3] = 255;
}
/**
* Decode all of the data blocks in the MCU. The MCU contains 1 or more data
* blocks for each component. The number of data blocks per component is
* determined by the horizontal + vertical sampling factors of each component.
*
* See A.2.3 for information about decoding interleaved data units.
*
* @param {DataViewReader} reader Data source
* @param {*} img Struct with information about the image, including frame + components
* @param {*} scan
* @param {*} vMCU Vertical index of this MCU
* @param {*} hMCU Horizontal index of this MCU
*/
function decodeMCU(reader, img, scan, vMCU, hMCU) {
// TODO: rename scan.components vs scan.orderedComponents, it's confusing. Can we
// just merge into a big 'component' data structure that has both the component
// info from the frame and the component info from the scan header? Probably
// breaks for hierachical images, but so what?
//
// Also want to bolt on the ZZ tables to Component....time to make a component class?
for (let i = 0; i < scan.orderedComponents.length; i++) {
let component = scan.orderedComponents[i];
let id = component.componentID;
// Find the component metadata in the scan so we can look up huff tables. Ugh so messy
let dcTableID = 0;
let acTableID = 0;
// for (sc of scan.components) {
for (let j = 0; j < scan.components.length; j++) {
let sc = scan.components[j];
if (sc[0] === id) {
dcTableID = sc[1];
acTableID = sc[2];
break;
}
}
let dcTable = img.huffmanTables[0][dcTableID]; // 0 is DC table, 1 is AC table: B.2.4.2
let acTable = img.huffmanTables[1][acTableID];
let quantTable = img.quantTables[component.quantTableID];
// # of H,V blocks (data units) per MCU in the component
let h = component.hSampleFactor;
let v = component.vSampleFactor;
// Top-left pixel coordinates of the MCU within the component
// mcuY: # vert MCUs * v blocks/MCU * block size * line stride of component
let mcuY = vMCU * v * DATA_UNIT_SIZE * component.hSize;
// mcuX: mcuY + (# horiz MCUs * h blocks/MCU * block size)
let mcuX = mcuY + (hMCU * h * DATA_UNIT_SIZE);
let block;
for (let y = 0; y < v; y++) { // Iterate over blocks in the MCU
for (let x = 0; x < h; x++) { // Iterate over blocks in the MCU
// Block is the decoded image data. Store it in a component
block = decodeDataUnit(reader, img, scan, id, dcTable, acTable, quantTable);
// Top-left coordinates of the block within the MCU
let topX = x * DATA_UNIT_SIZE;
let topY = y * DATA_UNIT_SIZE * component.hSize;
// Copy block contents to component
for (let yy = 0; yy < DATA_UNIT_SIZE; yy++) { // yy is local y coordinate in block
let blockLineStart = mcuX + topY + topX + (yy * component.hSize);
for (let xx = 0; xx < DATA_UNIT_SIZE; xx++) { // xx is local x coordinate in block
let idx = blockLineStart + xx;
if (component.imgBuff[idx] != 0) {
console.warn("Warning: overwriting imgBuf data: idx=" + idx
+ ", x=" + x + ", y=" + y + ", h=" + h + ", v=" + v + ", hMCU="
+ hMCU + ", vMCU=" + vMCU + ", topX=" + topX + ", topY="
+ topY + ", yy=" + yy + ", xx=" + xx + ", blockLineStart=" + blockLineStart);
}
component.imgBuff[idx] = block[yy * DATA_UNIT_SIZE + xx];
}
}
}
}
}
}
/**
* Read and decode the DC coefficient from the bit stream. Side effect is that it loads the
*
* Returns the DC coefficient
*
* F.2.2.1
*
* @param {DataViewReader} reader Data source
* @param {*} img Struct containing information about the image
* @param {*} scan Struct containing information about the scan (specifically the table of DC predictors)
* @param {*} id ID of the current component. Used to look up the current DC predictor
* @param {*} dcTable Huffman table implementation for DC coefficients
*/
function decodeDCCoeff(reader, img, scan, id, dcTable) {
let t = dcTable.decodeHuffman(reader, img);
let diff = receive(reader, img, t);
diff = extend(diff, t);
let dcpred = scan.dcpred[id];
// console.log("DC Coeff: val: " + t + ", diff: " + diff + ", pred: " + dcpred);
return dcpred + diff;
}
/**
* Read and decode the AC coefficients from the bit stream and load them into
* the array of zig-zag'ed coefficients
*
* F.2.2.2
*
* @param {DataViewReader} reader Data source
* @param {*} img Struct containing information about the image
* @param {Array} zigzagCoeff 64-element array where we store the AC coefficients
* @param {*} acTable Huffman table implementation for AC coefficients
*/
function decodeACCoeffs(reader, img, zigzagCoeff, acTable) {
for (let k = 1; k < 64; k++) {
const rs = acTable.decodeHuffman(reader, img);
// F.1.2.2
const ssss = getLowNibble(rs); // Amplitude of next non-zero coeff in ZZ
const rrrr = getHighNibble(rs); // Run length of zero coeffs in ZZ before next non-zero
if (ssss === 0) {
if (rrrr === 0xF) {
// console.log("ZRL");
k += 15;
continue;
} else if (rrrr === 0) {
// console.log("EOB");
break;
} else {
// Other values undefined for Baseline
console.error("Error: unexpected RRRR (expected 0 or 0x0f, got " + rrrr + ")");
}
} else {
k += rrrr;
// Implements the 'DECODE_ZZ' function from Figure F.14
// Decode amplitude + sign
const amp = receive(reader, img, ssss);
const sign = extend(amp, ssss);
// console.log("AC[" + k + "]: (" + amp + ", " + sign + ")");
zigzagCoeff[k] = sign;
}
}
}
/**
* Read and decode a single 8x8 data unit. In order, we:
* - Decode the DC coefficient
* - Decode the 63 AC coefficients
* - Dequantize the 64 coefficients
* - Convert coefficients from zig-zag order to sequential order
* - Perform Inverse DCT (IDCT)
* - Recenter the coefficients
*
* F.2.1.2
*
* @param {DataViewReader} reader Data source
* @param {*} img Struct with information about the image, including frame + components
* @param {*} scan Struct with information about this scan
* @param {*} id ID of the current component. Used to look up the current DC predictor
* @param {*} dcTable Huffman table to use for decoding DC coefficients
* @param {*} acTable Huffamn table to use for decoding AC coefficients
* @param {*} quantTable Quantization table for this block
*/
function decodeDataUnit(reader, img, scan, id, dcTable, acTable, quantTable) {
// 8x8 table of DC/AC coeffs
scan.block.fill(0);
// Decode DC Coeff for 8x8 block using DC table dest in scan header (F.2.2.1)
scan.block[0] = decodeDCCoeff(reader, img, scan, id, dcTable);
scan.dcpred[id] = scan.block[0];
// Decode AC coeffs for 8x8 block using AC table dest in scan header (F.2.2.2)
decodeACCoeffs(reader, img, scan.block, acTable);
// Dequantize using table dest in frame header (F.2.1.4)
// Multiply each coefficient by the corresponding
// value in the quant table (which is in ZZ order too)
dequantize(scan.block, quantTable);
// Reorder coefficients (de-zig-zag)
reorder(scan.block, scan.scratch);
// Calculate inverse IDCT on dequantized values (F.2.1.5)
let block = idctFn(scan.block);
// Level-shift (F.2.1.5)
levelShift(block);
return block;
}
/**
* Perform level-shift of coefficients in the block. Operates in-place.
*
* F.2.1.5
*
* @param {*} block
*/
function levelShift(block) {
for (let i = 0; i < block.length; i++) {
block[i] += 128;
}
}
/**
* Reorder the coefficients into their natural order (de-zig-zag)
*
* Not clearly specified, but clearly necessary.
*
* Operates in-place on coeff.
*
* @param {*} coeff Coefficients in zig-zag order
* @param {*} scratch Scratch space to use when reordering
*/
function reorder(coeff, scratch) {
for (let i = 0; i < coeff.length; i++) {
scratch[i] = coeff[ZIGZAG[i]];
}
for (let i = 0; i < coeff.length; i++) {
coeff[i] = scratch[i];
}
}
/**
* Dequantize the components in the table using the values from the
* given quantization table. Operates in-place.
*
* F.2.1.4
*
* @param {*} zigzagCoeff
* @param {*} quantTable
*/
function dequantize(zigzagCoeff, quantTable) {
for (let i = 0; i < zigzagCoeff.length; i++) {
zigzagCoeff[i] = zigzagCoeff[i] * quantTable[i];
}
}
/**
* Clamp an integer value to the range [0-255]
*
* @param {*} val
*/
function clamp(val) {
return (val > 255 ? 255 : (val < 0 ? 0 : val));
}
/**
* Implements the EXTEND function to scale the value of a coefficient
*
* F.2.2.1
* Figure F.12
*
* @param {*} v
* @param {*} t
*/
function extend(v, t) {
if (t === 0) {
return 0;
}
let vt = 1 << (t - 1); // Same as 2 ** (t - 1), which is what the JPEG spec uses
if (v < vt) {
vt = (-1 << t) + 1;
v += vt
}
return v;
}
/**
* Implements the RECEIVE function to read a sequence of bits
*
* F.2.2.4
*
* @param {DataViewReader} reader Data source
* @param {*} img Struct with information about the image, including frame + components
* @param {*} ssss Number of bits to read
*/
function receive(reader, img, ssss) {
let v = 0;
for (let i = 0; i < ssss; i++) {
v = (v << 1) + reader.nextBit(img);
}
return v;
}
/**
* Parse App segment
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function parseAppSegment(marker, reader) {
let length = reader.nextWord();
console.log(" Length of " + marker + " segment: " + length);
let tmp = reader.currentIndex();
if (marker === "APP0") {
if (length >= 16) {
let str = "";
for (let i = 0; i < 5; i++) {
str += String.fromCharCode(reader.nextByte());
}
if (str === "JFIF\0") {
let major = reader.nextByte();
let minor = reader.nextByte();
console.log("JFIF version: " + major + "." + minor);
let units = reader.nextByte();
let xdens = reader.nextWord()
let ydens = reader.nextWord();
let xthumb = reader.nextByte();
let ythumb = reader.nextByte();
let thumbsize = xthumb * ythumb * 3; // 3 bytes: RGB
//console.log("thumbnail size: " + xthumb + "x" + ythumb + ", " + thumbsize + " bytes");
reader.skip(thumbsize);
}
}
} else {
let content = "";
for (let i = 0; i < (length - 2); i++) {
content += String.fromCharCode(reader.nextByte());
}
console.log(marker + " content: " + content);
}
// TODO: figure out where to set the reader
return tmp + length - 2; // Length includes the 2 bytes that describe length
}
/**
* Parse quantization table
*
* @param {String} marker Name of the segment
* @param {DataViewReader} reader
*/
function parseQuantizationTable(marker, reader, img) {
let length = reader.nextWord()
//console.log(" Length of " + marker + " segment: " + length);
if (img.quantTables === undefined) {
img.quantTables = [ [], [], [], [] ];
}