-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbibliography.bib
666 lines (620 loc) · 66.8 KB
/
bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
@article{olsen_deepweeds_2019,
title = {{DeepWeeds}: {A} {Multiclass} {Weed} {Species} {Image} {Dataset} for {Deep} {Learning}},
volume = {9},
copyright = {2019 The Author(s)},
issn = {2045-2322},
shorttitle = {Ol19},
url = {https://www.nature.com/articles/s41598-018-38343-3},
doi = {10.1038/s41598-018-38343-3},
abstract = {Robotic weed control has seen increased research of late with its potential for boosting productivity in agriculture. Majority of works focus on developing robotics for croplands, ignoring the weed management problems facing rangeland stock farmers. Perhaps the greatest obstacle to widespread uptake of robotic weed control is the robust classification of weed species in their natural environment. The unparalleled successes of deep learning make it an ideal candidate for recognising various weed species in the complex rangeland environment. This work contributes the first large, public, multiclass image dataset of weed species from the Australian rangelands; allowing for the development of robust classification methods to make robotic weed control viable. The DeepWeeds dataset consists of 17,509 labelled images of eight nationally significant weed species native to eight locations across northern Australia. This paper presents a baseline for classification performance on the dataset using the benchmark deep learning models, Inception-v3 and ResNet-50. These models achieved an average classification accuracy of 95.1\% and 95.7\%, respectively. We also demonstrate real time performance of the ResNet-50 architecture, with an average inference time of 53.4 ms per image. These strong results bode well for future field implementation of robotic weed control methods in the Australian rangelands.},
language = {en},
number = {1},
urldate = {2024-10-07},
journal = {Scientific Reports},
author = {Olsen, Alex and Konovalov, Dmitry A. and Philippa, Bronson and Ridd, Peter and Wood, Jake C. and Johns, Jamie and Banks, Wesley and Girgenti, Benjamin and Kenny, Owen and Whinney, James and Calvert, Brendan and Azghadi, Mostafa Rahimi and White, Ronald D.},
month = feb,
year = {2019},
note = {Publisher: Nature Publishing Group},
keywords = {Classification and taxonomy, Electrical and electronic engineering, Environmental impact, Image processing, Machine learning},
pages = {2058},
}
@article{ubaid_precision_2024,
title = {Precision {Agriculture}: {Computer} {Vision}-{Enabled} {Sugarcane} {Plant} {Counting} in the {Tillering} {Phase}},
volume = {10},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {2313-433X},
shorttitle = {{UJ24}},
url = {https://www.mdpi.com/2313-433X/10/5/102},
doi = {10.3390/jimaging10050102},
abstract = {The world’s most significant yield by production quantity is sugarcane. It is the primary source for sugar, ethanol, chipboards, paper, barrages, and confectionery. Many people are affiliated with sugarcane production and their products around the globe. The sugarcane industries make an agreement with farmers before the tillering phase of plants. Industries are keen on knowing the sugarcane field’s pre-harvest estimation for planning their production and purchases. The proposed research contribution is twofold: by publishing our newly developed dataset, we also present a methodology to estimate the number of sugarcane plants in the tillering phase. The dataset has been obtained from sugarcane fields in the fall season. In this work, a modified architecture of Faster R-CNN with feature extraction using VGG-16 with Inception-v3 modules and sigmoid threshold function has been proposed for the detection and classification of sugarcane plants. Significantly promising results with 82.10\% accuracy have been obtained with the proposed architecture, showing the viability of the developed methodology.},
language = {en},
number = {5},
urldate = {2024-10-07},
journal = {Journal of Imaging},
author = {Ubaid, Muhammad Talha and Javaid, Sameena},
month = may,
year = {2024},
note = {Number: 5
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {faster R-CNN, object detection, plant counting, sugarcane counting, sugarcane detection},
pages = {102},
}
@article{rahman_performance_2023,
title = {Performance evaluation of deep learning object detectors for weed detection for cotton},
volume = {3},
issn = {2772-3755},
shorttitle = {{RLW23}},
url = {https://www.sciencedirect.com/science/article/pii/S2772375522000910},
doi = {10.1016/j.atech.2022.100126},
abstract = {Alternative non-chemical or chemical-reduced weed control tactics are critical for future integrated weed management, especially for herbicide-resistant weeds. Through weed detection and localization, machine vision technology has the potential to enable site- and species-specific treatments targeting individual weed plants. However, due to unstructured field circumstances and the large biological variability of weeds, robust and accurate weed detection remains a challenging endeavor. Deep learning (DL) algorithms, powered by large-scale image data, promise to achieve the weed detection performance required for precision weeding. In this study, a three-class weed dataset with bounding box annotations was curated, consisting of 848 color images collected in cotton fields under variable field conditions. A set of 13 weed detection models were built using DL-based one-stage and two-stage object detectors, including YOLOv5, RetinaNet, EfficientDet, Fast RCNN and Faster RCNN, by transferring pretrained the object detection models to the weed dataset. RetinaNet (R101-FPN), despite its longer inference time, achieved the highest overall detection accuracy with a mean average precision ([email protected]) of 79.98\%. YOLOv5n showed the potential for real-time deployment in resource-constraint devices because of the smallest number of model parameters (1.8 million) and the fastest inference (17 ms on the Google Colab) while achieving comparable detection accuracy (76.58\% [email protected]). Data augmentation through geometric and color transformations could improve the accuracy of the weed detection models by a maximum of 4.2\%. The software programs and the weed dataset used in this study are made publicly available (https://github.com/abdurrahman1828/DNNs-for-Weed-Detections; www.kaggle.com/yuzhenlu/cottonweeddet3).},
urldate = {2024-10-07},
journal = {Smart Agricultural Technology},
author = {Rahman, Abdur and Lu, Yuzhen and Wang, Haifeng},
month = feb,
year = {2023},
keywords = {Computer vision, Dataset, Object detectors, Precision agriculture, Weed control},
pages = {100126},
}
@article{genze_manually_2024,
title = {Manually annotated and curated {Dataset} of diverse {Weed} {Species} in {Maize} and {Sorghum} for {Computer} {Vision}},
volume = {11},
issn = {2052-4463},
shorttitle = {Ge24},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10805845/},
doi = {10.1038/s41597-024-02945-6},
abstract = {Sustainable weed management strategies are critical to feeding the world’s population while preserving ecosystems and biodiversity. Therefore, site-specific weed control strategies based on automation are needed to reduce the additional time and effort required for weeding. Machine vision-based methods appear to be a promising approach for weed detection, but require high quality data on the species in a specific agricultural area. Here we present a dataset, the Moving Fields Weed Dataset (MFWD), which captures the growth of 28 weed species commonly found in sorghum and maize fields in Germany. A total of 94,321 images were acquired in a fully automated, high-throughput phenotyping facility to track over 5,000 individual plants at high spatial and temporal resolution. A rich set of manually curated ground truth information is also provided, which can be used not only for plant species classification, object detection and instance segmentation tasks, but also for multiple object tracking.},
urldate = {2024-10-07},
journal = {Scientific Data},
author = {Genze, Nikita and Vahl, Wouter K. and Groth, Jennifer and Wirth, Maximilian and Grieb, Michael and Grimm, Dominik G.},
month = jan,
year = {2024},
pmid = {38263173},
pmcid = {PMC10805845},
pages = {109},
}
@article{weyler_phenobench_2024,
title = {{PhenoBench}: {A} {Large} {Dataset} and {Benchmarks} for {Semantic} {Image} {Interpretation} in the {Agricultural} {Domain}},
issn = {1939-3539},
shorttitle = {We24},
url = {https://ieeexplore.ieee.org/abstract/document/10572312},
doi = {10.1109/TPAMI.2024.3419548},
abstract = {The production of food, feed, fiber, and fuel is a key task of agriculture, which has to cope with many challenges in the upcoming decades, e.g., a higher demand, climate change, lack of workers, and the availability of arable land. Vision systems can support making better and more sustainable field management decisions, but also support the breeding of new crop varieties by allowing temporally dense and reproducible measurements. Recently, agricultural robotics got an increasing interest in the vision and robotics communities since it is a promising avenue for coping with the aforementioned lack of workers and enabling more sustainable production. While large datasets and benchmarks in other domains are readily available and enable significant progress, agricultural datasets and benchmarks are comparably rare. We present an annotated dataset and benchmarks for the semantic interpretation of real agricultural fields. Our dataset recorded with a UAV provides high-quality, pixel-wise annotations of crops and weeds, but also crop leaf instances at the same time. Furthermore, we provide benchmarks for various tasks on a hidden test set comprised of different fields: known fields covered by the training data and a completely unseen field. Our dataset, benchmarks, and code are available at https://www.phenobench.org.},
urldate = {2024-10-08},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
author = {Weyler, Jan and Magistri, Federico and Marks, Elias and Chong, Yue Linn and Sodano, Matteo and Roggiolani, Gianmarco and Chebrolu, Nived and Stachniss, Cyrill and Behley, Jens},
year = {2024},
note = {Conference Name: IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {Agricultural robots, Annotations, Autonomous aerial vehicles, Benchmark testing, Climate change, Crops, Data models, Farming, Food products, Machine vision, Management, Reproducibility of results, Semantics, Sustainable development, Task analysis, Training data},
pages = {1--12},
}
@article{teimouri_weed_2018,
title = {Weed {Growth} {Stage} {Estimator} {Using} {Deep} {Convolutional} {Neural} {Networks}},
volume = {18},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {1424-8220},
shorttitle = {Te18},
url = {https://www.mdpi.com/1424-8220/18/5/1580},
doi = {10.3390/s18051580},
abstract = {This study outlines a new method of automatically estimating weed species and growth stages (from cotyledon until eight leaves are visible) of in situ images covering 18 weed species or families. Images of weeds growing within a variety of crops were gathered across variable environmental conditions with regards to soil types, resolution and light settings. Then, 9649 of these images were used for training the computer, which automatically divided the weeds into nine growth classes. The performance of this proposed convolutional neural network approach was evaluated on a further set of 2516 images, which also varied in term of crop, soil type, image resolution and light conditions. The overall performance of this approach achieved a maximum accuracy of 78\% for identifying Polygonum spp. and a minimum accuracy of 46\% for blackgrass. In addition, it achieved an average 70\% accuracy rate in estimating the number of leaves and 96\% accuracy when accepting a deviation of two leaves. These results show that this new method of using deep convolutional neural networks has a relatively high ability to estimate early growth stages across a wide variety of weed species.},
language = {en},
number = {5},
urldate = {2024-10-08},
journal = {Sensors},
author = {Teimouri, Nima and Dyrmann, Mads and Nielsen, Per Rydahl and Mathiassen, Solvejg Kopp and Somerville, Gayle J. and Jørgensen, Rasmus Nyholm},
month = may,
year = {2018},
note = {Number: 5
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {computer vision, convolutional neural network, deep learning, growth stage, leaf counting},
pages = {1580},
}
@article{espejo-garcia_towards_2020,
title = {Towards weeds identification assistance through transfer learning},
volume = {171},
issn = {0168-1699},
shorttitle = {Es20},
url = {https://www.sciencedirect.com/science/article/pii/S0168169919319854},
doi = {10.1016/j.compag.2020.105306},
abstract = {Reducing the use of pesticides through selective spraying is an important component towards a more sustainable computer-assisted agriculture. Weed identification at early growth stage contributes to reduced herbicide rates. However, while computer vision alongside deep learning have overcome the performance of approaches that use hand-crafted features, there are still some open challenges in the development of a reliable automatic plant identification system. These type of systems have to take into account different sources of variability, such as growth stages and soil conditions, with the added constraint of the limited size of usual datasets. This study proposes a novel crop/weed identification system that relies on a combination of fine-tuning pre-trained convolutional networks (Xception, Inception-Resnet, VGNets, Mobilenet and Densenet) with the “traditional” machine learning classifiers (Support Vector Machines, XGBoost and Logistic Regression) trained with the previously deep extracted features. The aim of this approach was to avoid overfitting and to obtain a robust and consistent performance. To evaluate this approach, an open access dataset of two crop [tomato (Solanum lycopersicum L.) and cotton (Gossypium hirsutum L.)] and two weed species [black nightshade (Solanum nigrum L.) and velvetleaf (Abutilon theophrasti Medik.)] was generated. The pictures were taken by different production sites across Greece under natural variable light conditions from RGB cameras. The results revealed that a combination of fine-tuned Densenet and Support Vector Machine achieved a micro F1 score of 99.29\% with a very low performance difference between train and test sets. Other evaluated approaches also obtained repeatedly more than 95\% F1 score. Additionally, our results analysis provides some heuristics for designing transfer-learning based systems to avoid overfitting without decreasing performance.},
urldate = {2024-10-08},
journal = {Computers and Electronics in Agriculture},
author = {Espejo-Garcia, Borja and Mylonas, Nikos and Athanasakos, Loukas and Fountas, Spyros and Vasilakoglou, Ioannis},
month = apr,
year = {2020},
keywords = {Deep learning, Open data, Precision agriculture, Transfer learning, Weed identification},
pages = {105306},
}
@article{leminen_madsen_open_2020,
title = {Open {Plant} {Phenotype} {Database} of {Common} {Weeds} in {Denmark}},
volume = {12},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {2072-4292},
shorttitle = {Ma20},
url = {https://www.mdpi.com/2072-4292/12/8/1246},
doi = {10.3390/rs12081246},
abstract = {For decades, significant effort has been put into the development of plant detection and classification algorithms. However, it has been difficult to compare the performance of the different algorithms, due to the lack of a common testbed, such as a public available annotated reference dataset. In this paper, we present the Open Plant Phenotype Database (OPPD), a public dataset for plant detection and plant classification. The dataset contains 7590 RGB images of 47 plant species. Each species is cultivated under three different growth conditions, to provide a high degree of diversity in terms of visual appearance. The images are collected at the semifield area at Aarhus University, Research Centre Flakkebjerg, Denmark, using a customized data acquisition platform that provides well-illuminated images with a ground resolution of ∼6.6 px mm − 1 . All images are annotated with plant species using the EPPO encoding system, bounding box annotations for detection and extraction of individual plants, applied growth conditions and time passed since seeding. Additionally, the individual plants have been tracked temporally and given unique IDs. The dataset is accompanied by two experiments for: (1) plant instance detection and (2) plant species classification. The experiments introduce evaluation metrics and methods for the two tasks and provide baselines for future work on the data.},
language = {en},
number = {8},
urldate = {2024-10-08},
journal = {Remote Sensing},
author = {Leminen Madsen, Simon and Mathiassen, Solvejg Kopp and Dyrmann, Mads and Laursen, Morten Stigaard and Paz, Laura-Carlota and Jørgensen, Rasmus Nyholm},
month = jan,
year = {2020},
note = {Number: 8
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {dataset, plant phenotyping, plant seedlings, weed control},
pages = {1246},
}
@article{wiesner-hanks_image_2018,
title = {Image set for deep learning: field images of maize annotated with disease symptoms},
volume = {11},
issn = {1756-0500},
shorttitle = {Wi18},
url = {https://doi.org/10.1186/s13104-018-3548-6},
doi = {10.1186/s13104-018-3548-6},
abstract = {Automated detection and quantification of plant diseases would enable more rapid gains in plant breeding and faster scouting of farmers’ fields. However, it is difficult for a simple algorithm to distinguish between the target disease and other sources of dead plant tissue in a typical field, especially given the many variations in lighting and orientation. Training a machine learning algorithm to accurately detect a given disease from images taken in the field requires a massive amount of human-generated training data.},
language = {en},
number = {1},
urldate = {2024-10-08},
journal = {BMC Research Notes},
author = {Wiesner-Hanks, Tyr and Stewart, Ethan L. and Kaczmar, Nicholas and DeChant, Chad and Wu, Harvey and Nelson, Rebecca J. and Lipson, Hod and Gore, Michael A.},
month = jul,
year = {2018},
keywords = {Artificial Intelligence, Convolutional neural network, Corn, Deep learning, Disease, Images, Machine learning, Maize, Phytopathology, Plant disease},
pages = {440},
}
@article{jiang_deepseedling_2019,
title = {{DeepSeedling}: deep convolutional network and {Kalman} filter for plant seedling detection and counting in the field},
volume = {15},
issn = {1746-4811},
shorttitle = {Ji19},
url = {https://doi.org/10.1186/s13007-019-0528-3},
doi = {10.1186/s13007-019-0528-3},
abstract = {Plant population density is an important factor for agricultural production systems due to its substantial influence on crop yield and quality. Traditionally, plant population density is estimated by using either field assessment or a germination-test-based approach. These approaches can be laborious and inaccurate. Recent advances in deep learning provide new tools to solve challenging computer vision tasks such as object detection, which can be used for detecting and counting plant seedlings in the field. The goal of this study was to develop a deep-learning-based approach to count plant seedlings in the field.},
language = {en},
number = {1},
urldate = {2024-10-08},
journal = {Plant Methods},
author = {Jiang, Yu and Li, Changying and Paterson, Andrew H. and Robertson, Jon S.},
month = nov,
year = {2019},
keywords = {Artificial Intelligence, Cotton, Faster RCNN, Object detection, Population density, Video tracking},
pages = {141},
}
@article{li_soybeannet_2024,
title = {{SoybeanNet}: {Transformer}-based convolutional neural network for soybean pod counting from {Unmanned} {Aerial} {Vehicle} ({UAV}) images},
volume = {220},
issn = {0168-1699},
shorttitle = {Li24},
url = {https://www.sciencedirect.com/science/article/pii/S0168169924002527},
doi = {10.1016/j.compag.2024.108861},
abstract = {Soybean is a critical source of food, protein, and oil, and thus has received extensive research aimed at enhancing their yield, refining cultivation practices, and advancing soybean breeding techniques. Within this context, soybean pod counting plays an essential role in understanding and optimizing production. Despite recent advancements, the development of a robust pod-counting algorithm capable of performing effectively in real-field conditions remains a significant challenge. This paper presents a pioneering work of accurate soybean pod counting utilizing unmanned aerial vehicle (UAV) images captured from actual soybean fields in Michigan, USA. Specifically, this paper presents SoybeanNet, a novel point-based counting network that harnesses powerful transformer backbones for simultaneous soybean pod counting and localization with high accuracy. In addition, a new dataset of UAV-acquired images for soybean pod counting was created and open-sourced, consisting of 113 drone images with more than 260k manually annotated soybean pods. The images are taken from an altitude of approximately 13 ft, with angles between 53 and 58 degrees, under natural lighting conditions. Through comprehensive evaluations, SoybeanNet demonstrates superior performance over five state-of-the-art approaches when tested on the collected images. Remarkably, SoybeanNet achieves a counting accuracy of 84.51\% when tested on the testing dataset, attesting to its efficacy in real-world scenarios. The publication also provides both the source code and the labeled soybean dataset, offering a valuable resource for future research endeavors in soybean pod counting and related fields.},
urldate = {2024-10-08},
journal = {Computers and Electronics in Agriculture},
author = {Li, Jiajia and Magar, Raju Thada and Chen, Dong and Lin, Feng and Wang, Dechun and Yin, Xiang and Zhuang, Weichao and Li, Zhaojian},
month = may,
year = {2024},
keywords = {Convolutional neural networks, Deep learning, Point-based crowd counting, Soybean pod counting, Soybean yield prediction, Transformer-based network},
pages = {108861},
}
@article{dang_yoloweeds_2023,
title = {{YOLOWeeds}: {A} novel benchmark of {YOLO} object detectors for multi-class weed detection in cotton production systems},
volume = {205},
issn = {0168-1699},
shorttitle = {Da23},
url = {https://www.sciencedirect.com/science/article/pii/S0168169923000431},
doi = {10.1016/j.compag.2023.107655},
abstract = {Weeds are among the major threats to cotton production. Overreliance on herbicides for weed control has accelerated the evolution of herbicide-resistance in weeds and caused increasing concerns about environments, food safety and human health. Machine vision systems for automated/robotic weeding have received growing interest towards the realization of integrated, sustainable weed management. However, in the presence of unstructured field environments and significant biological variability of weeds, it remains a serious challenge to develop reliable weed identification and detection systems. A promising solution to address this challenge are the development of arge-scale, annotated image datasets of weeds specific to cropping systems and data-driven AI (artificial intelligence) models for weed detection. Among various deep learning architectures, a diversity of YOLO (You Only Look Once) detectors is well-suited for real-time application and has enjoyed great popularity for generic object detection. This study presents a new dataset (CottoWeedDet12) of weeds important to cotton production in the southern United States (U.S.); it consists of 5648 images of 12 weed classes with a total of 9370 bounding box annotations, collected under natural light conditions and at varied weed growth stages in cotton fields. A novel, comprehensive benchmark of 25 state-of-the-art YOLO object detectors of seven versions including YOLOv3, YOLOv4, Scaled-YOLOv4, YOLOR and YOLOv5, YOLOv6 and YOLOv7, has been established for weed detection on the dataset. Evaluated through the Monte-Caro cross validation with 5 replications, the detection accuracy in terms of [email protected] ranged from 88.14 \% by YOLOv3-tiny to 95.22 \% by YOLOv4, and the accuracy in terms of mAP@[0.5:0.95] ranged from 68.18 \% by YOLOv3-tiny to 89.72 \% by Scaled-YOLOv4. All the YOLO models especially YOLOv5n and YOLOv5s have shown great potential for real-time weed detection, and data augmentation could increase weed detection accuracy. Both the weed detection dataset22https://doi.org/10.5281/zenodo.7535814 and software program codes for model benchmarking in this study are publicly available33https://github.com/DongChen06/DCW, which will be to be valuable resources for promoting future research on big data and AI-empowered weed detection and control for cotton and potentially other crops.},
urldate = {2024-10-08},
journal = {Computers and Electronics in Agriculture},
author = {Dang, Fengying and Chen, Dong and Lu, Yuzhen and Li, Zhaojian},
month = feb,
year = {2023},
keywords = {Cotton, Dataset, Deep learning, Machine vision, Precision agriculture, Weed detection},
pages = {107655},
}
@article{antony_rice_2023,
title = {Rice {Leaf} {Diseases} {Dataset}},
volume = {1},
shorttitle = {An23},
url = {https://data.mendeley.com/datasets/dwtn3c6w6p/1},
doi = {10.17632/dwtn3c6w6p.1},
abstract = {Overview: The Rice Life Disease Dataset is an extensive collection of data focused on three major diseases that affect rice plants: Bacterial Blight (BB), Brown Spot (BS), and Leaf Smut (LS). The dataset has been curated to assist researchers, agronomists, and machine learning practitioners in understanding, diagnosing, and potentially predicting the occurrence of these diseases, based on various attributes and parameters. Dataset Features: 1. Disease Type: This categorizes the observation into one of the three diseases: Bacterial Blight (BB), Brown Spot (BS), or Leaf Smut (LS). 2. Leaf Images: High-resolution images of rice leaves exhibiting symptoms of the specified disease. This aids in visual diagnosis and machine learning-based image recognition tasks. 3. Symptom Description: Textual description outlining the major symptoms visible on the leaf, offering a more detailed understanding of the disease's progression and manifestation. 4. Environmental Parameters: Data on temperature, humidity, and other weather conditions at the time of observation. This can help in understanding the environmental triggers for each disease. Potential Uses: 1. Disease Prediction and Early Detection: Machine learning models can be trained on this dataset to predict the likelihood of a rice plant contracting one of these diseases based on environmental and agronomic factors. 2. Disease Distribution Mapping: Understand the geographical spread and hotspots of these diseases. 3. Impact of Agronomic Practices: Determine which farming practices might contribute to or deter the spread of these diseases. 4. Image Recognition: Train machine learning models to automatically detect and classify these diseases from images of rice leaves.},
language = {en},
urldate = {2024-10-11},
author = {Antony, Lourdu and Prasanth, Leo},
month = oct,
year = {2023},
note = {Publisher: Mendeley Data},
}
@article{riehle_robust_2020,
title = {Robust index-based semantic plant/background segmentation for {RGB}- images},
volume = {169},
issn = {0168-1699},
shorttitle = {{RRG20}},
url = {https://www.sciencedirect.com/science/article/pii/S0168169919314346},
doi = {10.1016/j.compag.2019.105201},
abstract = {Plant/background segmentation is a key component of digital image analysis in agriculture. It can be used for yield prediction models, crop growth, disease diagnosis and automated navigation tasks. In particular, well-known segmentation methods are strongly influenced by changing light conditions during image acquisition as well as different color variations of plants and the background in the scene. In this work, an algorithm was developed and evaluated which can perform robust automated plant/background segmentation under varying capture conditions such as overexposure or underexposure along with diverse colors of the crop and background. The algorithm relies on an index-based method for approximate pre-segmentation and uses this first approximation to calculate the threshold value for a segmentation using a color space model. The performance of the algorithm was evaluated with 200 images from 4 different cameras and settings. Furthermore, the algorithm quality was compared with existing index-based segmentation methods such as the excess green index, the excess red index, excess green minus excess red index. All indices were combined with Otsu thresholding. The results showed that the novel approach has a more robust performance and is more reliable than the other index-based segmentation methods. The investigations showed excellent results of the algorithm using the CieLab color space with semantic plant segmentation accuracies of 97.4\%.},
urldate = {2024-10-09},
journal = {Computers and Electronics in Agriculture},
author = {Riehle, Daniel and Reiser, David and Griepentrog, Hans W.},
month = feb,
year = {2020},
keywords = {CieLab, Index-based segmentation, Otsu, Plant/soil segmentation, Semantic segmentation},
pages = {105201},
}
@article{bevers_soybean_2022,
title = {Soybean disease identification using original field images and transfer learning with convolutional neural networks},
volume = {203},
issn = {0168-1699},
shorttitle = {{BSH22}},
url = {https://www.sciencedirect.com/science/article/pii/S0168169922007578},
doi = {10.1016/j.compag.2022.107449},
abstract = {Meeting the growing demand for soybeans will require increased production. One approach would be to reduce yield loss from plant diseases. In the U.S., soybean diseases account for approximately 8–25\% of average annual yield loss. Early and accurate detection of pathogens is key for effective disease management strategies and can help to minimize pesticide usage and thus boost overall productivity. Recent advancements in computer vision could move us towards that goal by making disease diagnostics expertise more readily accessible to every-one. To that end, we developed an automated classifier of digital images of soybean diseases, based on convolutional neural networks (CNN). For model training and validation, we acquired more than 9,500 original soybean images, representing eight distinct disease and deficiency classes: (1) healthy/asymptomatic, (2) bacterial blight, (3) Cercospora leaf blight, (4) downy mildew, (5) frogeye leaf spot, (6) soybean rust, (7) target spot, and (8) potassium deficiency. To make training more efficient we experimented with a variety of approaches to transfer learning, data engineering, and data augmentation. Our best performing model was based on the DenseNet201 architecture. After training from scratch, it achieved an overall testing accuracy of 96.8\%. Experimenting with full or partial freezing of core DenseNet201 model weights did not improve performance. Neither did a deliberate effort to increase the diversity of subject backgrounds in the digital images. Models performed best when trained on datasets composed exclusively of images of soybean leaves still attached to the plant in the field; conversely, mixing in images of detached leaves on simple backgrounds reduced performance. On the other hand, data augmentation to increase representational parity across disease classes provided a substantial performance boost. Our development experience may provide useful insights for researchers considering how to best build and analyze datasets for similar applications.},
urldate = {2024-10-08},
journal = {Computers and Electronics in Agriculture},
author = {Bevers, Noah and Sikora, Edward J. and Hardy, Nate B.},
month = dec,
year = {2022},
keywords = {Augmentation, Convolution, Disease diagnostics, Image classification, Soybean, Transfer learning},
pages = {107449},
}
@article{rajput_soynet_2023,
title = {{SoyNet}: {A} high-resolution {Indian} soybean image dataset for leaf disease classification},
volume = {49},
shorttitle = {Ra23a},
url = {https://data.mendeley.com/datasets/w2r855hpx8/1},
doi = {10.17632/w2r855hpx8.1},
abstract = {High-quality images of soybean are required to solve soybean disease and healthy leaves classification and recognition problems. To build the machine learning models, deep learning models with neat and clean dataset is the elementary requirement in research. With this objective, this data set is created, which consists of healthy and disease-quality images of soybean named “SoyNet”. This dataset consists of 4500+ high-quality images of soybeans (healthy and Disease quality) with different angles and Images captured direct from the soybean agriculture field to analyze the real problem in research. The images are divided into 2 sub-folders 1) Raw SoyNet Data and 2) Pre-processing SoyNet Data. Each Sub folder contains a digital camera Click, which contains healthy and disease image folders, and 2) Mobile Phone Click, which contains disease images. The Pre-processing SoyNet Data contains folders of 256*256 resized images and grayscale images in a similar manner to disease and healthy data. A Digital Camera and a Mobile phone with a high-end resolution camera were used to capture the images. The images were taken at the soybean cultivation field in different lighting conditions and backgrounds. The proposed dataset can be used for training, testing, and validation of soybean classification or reorganization models.},
language = {en},
urldate = {2024-10-14},
journal = {Data in Brief},
author = {Rajput, Arpan Singh and Shukla, Shailja and Thakur, Professor Samajh Singh},
month = may,
year = {2023},
note = {Publisher: Mendeley Data},
pages = {109447},
}
@article{kitzler_we3ds_2023,
title = {{WE3DS}: {An} {RGB}-{D} {Image} {Dataset} for {Semantic} {Segmentation} in {Agriculture}},
volume = {23},
issn = {14248220},
shorttitle = {Ki23},
url = {https://www.mendeley.com/catalogue/6873cb53-b2e2-34e7-8b49-5d36b1c51fbe/},
doi = {10.3390/s23052713},
abstract = {(2023) Kitzler et al. Sensors. Smart farming (SF) applications rely on robust and accurate computer vision systems. An important computer vision task in agriculture is semantic segmentation, which ...},
language = {en-GB},
number = {5},
urldate = {2024-10-14},
journal = {Sensors},
author = {Kitzler, Florian and Barta, Norbert and Neugschwandtner, Reinhard W. and Gronauer, Andreas and Motsch, Viktoria},
year = {2023},
note = {Number: 5},
pages = {2713},
}
@article{olaniyi_maize-weed_2022,
title = {Maize-{Weed} {Image} {Dataset}},
volume = {1},
shorttitle = {Ol22},
url = {https://data.mendeley.com/datasets/jjbfcckrsp/1},
doi = {10.17632/jjbfcckrsp.1},
abstract = {The dataset contains images of maize plants and weed species. The dataset contains 36874 images in total and stored in three folders namely Dry Season, Wet Season and Annotated. The Dry Season contains 18187 images captured during dry season farm survey,the Wet Season contains 18187 images captured during wet season farm survey and the Annotated contains 500 annotated images selected from the Dry Season survey saved in JSON,XML and txt format. The annotated was achieved using the Labelmg suite. The raw wet and dry seasons images have been captured using a high-resolution digital camera during the weed survey, while the annotation of the annotated image was done using the Labelmg suite. A total of 18 farm locations were visited in North Central part of Nigeria for the data acquisition as part of an ongoing research effort on maize -weeds identification in farmlands.},
language = {en},
urldate = {2024-10-14},
author = {Olaniyi, Olayemi and Salaudeen, Muhammadu and Daniya, Emmanuel and Mohammed Abdullahi, Ibrahim and Folorunso, Taliha Abiodun and Bala, Jibril and Bello Kontagora, Nuhu and Adedigba, Adeyinka and Oluwole, Blessing and Macarthy, Odunayo and Bankole, Abdullah},
month = oct,
year = {2022},
note = {Publisher: Mendeley Data},
}
@article{rai_imageweeds_2023,
title = {{ImageWeeds}: {An} {Image} dataset consisting of weeds in multiple formats to advance computer vision algorithms for real-time weed identification and spot spraying application},
volume = {2},
shorttitle = {Ra23c},
url = {https://data.mendeley.com/datasets/8kjcztbjz2/2},
doi = {10.17632/8kjcztbjz2.2},
abstract = {Dataset consists of aerial and greenhouse images in multiple formats to advance computer vision algorithms for site-specific spraying applications.},
language = {en},
urldate = {2024-10-14},
author = {Rai, Nitin and Villamil Mahecha, Maria and Christensen, Annika and Quanbeck, Jamison and Howatt, Kirk and Ostlie, Michael and Zhang, Yu and Sun, Xin},
month = jun,
year = {2023},
note = {Publisher: Mendeley Data},
}
@article{moazzam_tobacco_2023,
title = {Tobacco {Aerial} {Dataset}},
volume = {1},
shorttitle = {Mo23},
url = {https://data.mendeley.com/datasets/5dpc5gbgpz/1},
doi = {10.17632/5dpc5gbgpz.1},
abstract = {A new aerial tobacco weed dataset with various field conditions. We have acquired a new tobacco-weed dataset using a Mavic Mini drone. Eight fields of tobacco crops are captured in Mardan, Khyber Pakhtunkhwa, Pakistan. At different growth stages, these eight fields are captured at a crop age of 15 to 40 days approximately. Images are captured at 1920 × 1080-pixel resolution; due to system memory limitations, we have cropped non-overlapping images of resolution 480 × 352 for processing. This image patch cropping is implemented using a code that reads images and creates non-overlapping tile images using two nested loops; the respective annotation images are also cropped simultaneously. Dataset is captured at an average altitude of 4 m with a ground sampling distance of 0.1 cm/ pixel. Images are labeled manually; background, crop, and weed have label values of 0, 1, and 2, respectively. Citation Request: if you use these datasets in your research or projects by any means, please cite following publications 1) Patch-wise weeds coarse segmentation mask from aerial imagery of sesame crop (Published in Computers and Electronics in Agriculture 2022, HEC Recognized W category, Impact factor 6.757, Q1) 2) Towards automated weed detection through two-stage semantic segmentation of tobacco and weed pixels in aerial Imagery (Published in Smart Agricultural Technology (A companion journal of Computers and Electronics in Agriculture)) 3) A Patch-Image Based Classification Approach for Detection of Weeds in Sugar Beet Crop (Published in IEEE Access, Impact factor 3.1, Q1) Acknowledgement Request This work is funded by the Higher Education Commission of Pakistan and the National center for Robotics and Automation (DF-1009–31). We thank Pakistan Tobacco Company for helping us find farms for data collection. Steps to Access Mendeley datasets 1. Click on the link 2. The link with ask you to sign in or register with institutional email. 3. Use your institutional/organization email to register and then sign in. 4. Once sign in, dataset will be visible in compressed folders 5. Download and unzip/umcompress folder 6. Use dataset in your research as you see fit (folders contains original images, and their labeled groundtruths, along with binary vegetation masks. In groundtruths background have label value of 0, crop have label 1 and weeds have label of 2. maskref subfolders shows labelled data for visualization)},
language = {en},
urldate = {2024-10-14},
author = {Moazzam, Imran},
month = feb,
year = {2023},
note = {Publisher: Mendeley Data},
}
@misc{noauthor_20200701_nodate,
title = {20200701 - {Narrabri} {Chickpea} {BFLYS} {Dataset} - {Weed}-{AI}},
shorttitle = {We24f},
url = {https://weed-ai.sydney.edu.au/datasets/839a5f35-9c7b-4df3-92f4-d0fc15120920},
urldate = {2024-10-14},
}
@misc{noauthor_amsinckia_nodate,
title = {Amsinckia in chickpeas {Dataset} - {Weed}-{AI}},
shorttitle = {We24b},
url = {https://weed-ai.sydney.edu.au/datasets/21675efe-9d25-4096-be76-3a541475efd4},
urldate = {2024-10-14},
}
@misc{noauthor_20190729_nodate,
title = {20190729 - {Annual} {Ryegrass} and {Turnipweed} in {Wheat} {Dataset} - {Weed}-{AI}},
shorttitle = {We24c},
url = {https://weed-ai.sydney.edu.au/datasets/5158bbe5-6030-48ad-8214-b68ff8118c22},
urldate = {2024-10-14},
}
@misc{noauthor_broadleaf_nodate,
title = {Broadleaf {Weeds} in {Common} {Couch} {Dataset} - {Weed}-{AI}},
shorttitle = {We24d},
url = {https://weed-ai.sydney.edu.au/datasets/8b14a44b-bc7f-4b92-9bc0-224a2a2c4e22},
urldate = {2024-10-14},
}
@misc{noauthor_20200827_nodate,
title = {20200827 - {Cobbity} {Wheat} {BFLY} {Dataset} - {Weed}-{AI}},
shorttitle = {We24e},
url = {https://weed-ai.sydney.edu.au/datasets/3c363da3-6274-45e4-a0ce-b307cb0f89cc},
urldate = {2024-10-14},
}
@misc{noauthor_20190728_nodate,
title = {20190728 - {Narrabri} {Wheat} {Dataset} - {Weed}-{AI}},
shorttitle = {We24a},
url = {https://weed-ai.sydney.edu.au/datasets/dc322d80-be00-49cf-822c-9e9b40e37425},
urldate = {2024-10-14},
}
@misc{noauthor_20190712_nodate,
title = {20190712 - {Northern} {WA} {Wheatbelt} {Blue} {Lupins} {Dataset} - {Weed}-{AI}},
shorttitle = {We24g},
url = {https://weed-ai.sydney.edu.au/datasets/9df290f4-a29b-44b2-9de6-24bca1cee846},
urldate = {2024-10-14},
}
@misc{noauthor_wild_nodate,
title = {Wild carrot flowers in canola {Dataset} - {Weed}-{AI}},
shorttitle = {We24h},
url = {https://weed-ai.sydney.edu.au/datasets/c4a80379-afda-4972-b274-82a544addd0d},
urldate = {2024-10-14},
}
@misc{noauthor_20210317_nodate,
title = {20210317 - {Wild} radish in wheat {Dataset} - {Weed}-{AI}},
shorttitle = {We24i},
url = {https://weed-ai.sydney.edu.au/datasets/09af32ad-2e9e-4f7c-ae08-55374824ee15},
urldate = {2024-10-14},
}
@misc{noauthor_radishwheatdataset_nodate,
title = {{RadishWheatDataset} {Dataset} - {Weed}-{AI}},
shorttitle = {We24j},
url = {https://weed-ai.sydney.edu.au/datasets/8b8f134f-ede4-4792-b1f7-d38fc05d8127},
urldate = {2024-10-14},
}
@misc{noauthor_brownlow_nodate,
title = {Brownlow {Hill} {Fireweed} {Dataset} - {Weed}-{AI}},
shorttitle = {We24k},
url = {https://weed-ai.sydney.edu.au/datasets/24b34712-c31b-4efc-9790-406d1f14d840},
urldate = {2024-10-14},
}
@misc{noauthor_emerging_nodate,
title = {Emerging ryegrass seedlings in a fallow soil {Dataset} - {Weed}-{AI}},
shorttitle = {We24l},
url = {https://weed-ai.sydney.edu.au/datasets/c828f20d-9b3b-451a-b1a3-eb35398760da},
urldate = {2024-10-14},
}
@misc{noauthor_palmer_nodate,
title = {Palmer amaranth {Growth} {Stage} - 8 ({PAGS8}) - {Part} 1 {Dataset} - {Weed}-{AI}},
shorttitle = {We24m},
url = {https://weed-ai.sydney.edu.au/datasets/5c78d067-8750-4803-9cbe-57df8fae55e4},
urldate = {2024-10-14},
}
@article{rashid_pumpkin_2024,
title = {Pumpkin {Leaf} {Diseases} {Dataset} {From} {Bangladesh}},
volume = {1},
shorttitle = {{RBH24}},
url = {https://data.mendeley.com/datasets/wtxcw8wpxb/1},
doi = {10.17632/wtxcw8wpxb.1},
abstract = {The dataset comprises 2000 images, covering pumpkin diseases such as Downy Mildew, Powdery Mildew, Mosaic Disease, Bacterial Leaf Spot, and healthy leaves.},
language = {en},
urldate = {2024-10-14},
author = {Rashid, Mohammad Rifat Ahmmad and Biswas, Joy and Hossain, Md Miskat},
month = jun,
year = {2024},
note = {Publisher: Mendeley Data},
}
@misc{noauthor_ai-lab-makerereibean_2024,
title = {{AI}-{Lab}-{Makerere}/ibean},
copyright = {MIT},
shorttitle = {Ma20a},
url = {https://github.com/AI-Lab-Makerere/ibean},
abstract = {Data repo for the ibean project of the AIR lab.},
urldate = {2024-10-14},
month = jun,
year = {2024},
note = {original-date: 2020-01-28T10:04:47Z},
}
@misc{noauthor_corn_nodate,
title = {Corn {Leaf} {Infection} {Dataset}},
shorttitle = {Ac20},
url = {https://www.kaggle.com/datasets/qramkrishna/corn-leaf-infection-dataset},
abstract = {A corn leaf dataset taken from field to help agriculture sector.},
language = {en},
urldate = {2024-10-14},
}
@article{rahman_new_2024,
title = {A {New} {Dataset} and {Comparative} {Study} for {Aphid} {Cluster} {Detection} and {Segmentation} in {Sorghum} {Fields}},
volume = {10},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {2313-433X},
shorttitle = {Ra24},
url = {https://www.mdpi.com/2313-433X/10/5/114},
doi = {10.3390/jimaging10050114},
abstract = {Aphid infestations are one of the primary causes of extensive damage to wheat and sorghum fields and are one of the most common vectors for plant viruses, resulting in significant agricultural yield losses. To address this problem, farmers often employ the inefficient use of harmful chemical pesticides that have negative health and environmental impacts. As a result, a large amount of pesticide is wasted on areas without significant pest infestation. This brings to attention the urgent need for an intelligent autonomous system that can locate and spray sufficiently large infestations selectively within the complex crop canopies. We have developed a large multi-scale dataset for aphid cluster detection and segmentation, collected from actual sorghum fields and meticulously annotated to include clusters of aphids. Our dataset comprises a total of 54,742 image patches, showcasing a variety of viewpoints, diverse lighting conditions, and multiple scales, highlighting its effectiveness for real-world applications. In this study, we trained and evaluated four real-time semantic segmentation models and three object detection models specifically for aphid cluster segmentation and detection. Considering the balance between accuracy and efficiency, Fast-SCNN delivered the most effective segmentation results, achieving 80.46\% mean precision, 81.21\% mean recall, and 91.66 frames per second (FPS). For object detection, RT-DETR exhibited the best overall performance with a 61.63\% mean average precision (mAP), 92.6\% mean recall, and 72.55 on an NVIDIA V100 GPU. Our experiments further indicate that aphid cluster segmentation is more suitable for assessing aphid infestations than using detection models.},
language = {en},
number = {5},
urldate = {2024-10-30},
journal = {Journal of Imaging},
author = {Rahman, Raiyan and Indris, Christopher and Bramesfeld, Goetz and Zhang, Tianxiao and Li, Kaidong and Chen, Xiangyu and Grijalva, Ivan and McCornack, Brian and Flippo, Daniel and Sharda, Ajay and Wang, Guanghui},
month = may,
year = {2024},
note = {Number: 5
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {aphid cluster, detection, multi-scale dataset, real time, segmentation},
pages = {114},
}
@article{guldenring_zoom_2024,
title = {Zoom in on the {Plant}: {Fine}-{Grained} {Analysis} of {Leaf}, {Stem}, and {Vein} {Instances}},
volume = {9},
issn = {2377-3766},
shorttitle = {{GAN23}},
url = {https://ieeexplore.ieee.org/abstract/document/10373101?casa_token=Li4B3hSYtBEAAAAA:YFrVa8i2lnXzWilG4PukLWD884-vKiVq7MbuvIaQiMNku4Fvpfquzwu1Qk_xPkB-kd2ras1WWgTt},
doi = {10.1109/LRA.2023.3346807},
abstract = {Robot perception is far from what humans are capable of. Humans do not only have a complex semantic scene understanding but also extract fine-grained intra-object properties for the salient ones. When humans look at plants, they naturally perceive the plant architecture with its individual leaves and branching system. In this work, we want to advance the granularity in plant understanding for agricultural precision robots. We develop a model to extract fine-grained phenotypic information, such as leaf-, stem-, and vein instances. The underlying dataset RumexLeaves is made publicly available and is the first of its kind with keypoint-guided polyline annotations leading along the line from the lowest stem point along the leaf basal to the leaf apex. Furthermore, we introduce an adapted metric POKS complying with the concept of keypoint-guided polylines. In our experimental evaluation, we provide baseline results for our newly introduced dataset while showcasing the benefits of POKS over OKS.},
number = {2},
urldate = {2024-10-30},
journal = {IEEE Robotics and Automation Letters},
author = {Güldenring, Ronja and Andersen, Rasmus Eckholdt and Nalpantidis, Lazaros},
month = feb,
year = {2024},
note = {Conference Name: IEEE Robotics and Automation Letters},
keywords = {Annotations, Crops, Feature extraction, field robots, Grasslands, image dataset, keypoint-guided polylines, Measurement, phenotyping, Robotics and automation in agriculture and forestry, Robots, Task analysis},
pages = {1588--1595},
}
@article{david_global_2021,
title = {Global {Wheat} {Head} {Dataset} 2021: an update to improve the benchmarking wheat head localization with more diversity},
shorttitle = {Da21},
url = {https://openreview.net/forum?id=fEoYkscKoS},
abstract = {The Global Wheat Head Detection (GWHD) dataset was created in 2020 and has assembled 193,634 labelled wheat heads from 4,700 RGB images acquired from various acquisition platforms and 7 countries/institutions. With an associated competition hosted in Kaggle, GWHD has successfully attracted attention from both the computer vision and agricultural science communities. From this first experience in 2020, a few avenues for improvements have been identified, especially from the perspective of data size, head diversity and label reliability. To address these issues, the 2020 dataset has been reexamined, relabeled, and augmented by adding 1,722 images from 5 additional countries, allowing for 81,553 additional wheat heads to be added. We now release a new version of the Global Wheat Head Detection (GWHD) dataset in 2021, which is bigger, more diverse, and less noisy than the 2020 version. The GWHD 2021 is now publicly available at http://www.global-wheat.com/ and a new data challenge has been organized on AIcrowd to make use of this updated dataset.},
language = {en},
urldate = {2024-10-30},
journal = {CoRR},
author = {David, Etienne and Serouart, Mario and Smith, Daniel and Madec, Simon and Velumani, Kaaviya and Liu, Shouyang and Wang, Xu and Espinosa, Francisco Pinto and Shafiee, Shahameh and Tahir, Izzat S. A. and Tsujimoto, Hisashi and Nasuda, Shuhei and Zheng, Bangyou and Kichgessner, Norbert and Aasen, Helge and Hund, Andreas and Sadeghi-Tehran, Pouria and Nagasawa, Koichi and Ishikawa, Goro and Dandrifosse, Sébastien and Carlier, Alexis and Mercatoris, Benoit and Kuroki, Ken and Wang, Haozhou and Ishii, Masanori and Badhon, Minhajul A. and Pozniak, Curtis and LeBauer, David Shaner and Lilimo, Morten and Poland, Jesse and Chapman, Scott C. and Solan, Benoit de and Baret, Frédéric and Stavness, Ian and Guo, Wei},
month = jan,
year = {2021},
}
@article{yordanov_crop_2023,
title = {Crop {Identification} {Using} {Deep} {Learning} on {LUCAS} {Crop} {Cover} {Photos}},
volume = {23},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {1424-8220},
shorttitle = {Yo23},
url = {https://www.mdpi.com/1424-8220/23/14/6298},
doi = {10.3390/s23146298},
abstract = {Massive and high-quality in situ data are essential for Earth-observation-based agricultural monitoring. However, field surveying requires considerable organizational effort and money. Using computer vision to recognize crop types on geo-tagged photos could be a game changer allowing for the provision of timely and accurate crop-specific information. This study presents the first use of the largest multi-year set of labelled close-up in situ photos systematically collected across the European Union from the Land Use Cover Area frame Survey (LUCAS). Benefiting from this unique in situ dataset, this study aims to benchmark and test computer vision models to recognize major crops on close-up photos statistically distributed spatially and through time between 2006 and 2018 in a practical agricultural policy relevant context. The methodology makes use of crop calendars from various sources to ascertain the mature stage of the crop, of an extensive paradigm for the hyper-parameterization of MobileNet from random parameter initialization, and of various techniques from information theory in order to carry out more accurate post-processing filtering on results. The work has produced a dataset of 169,460 images of mature crops for the 12 classes, out of which 15,876 were manually selected as representing a clean sample without any foreign objects or unfavorable conditions. The best-performing model achieved a macro F1 (M-F1) of 0.75 on an imbalanced test dataset of 8642 photos. Using metrics from information theory, namely the equivalence reference probability, resulted in an increase of 6\%. The most unfavorable conditions for taking such images, across all crop classes, were found to be too early or late in the season. The proposed methodology shows the possibility of using minimal auxiliary data outside the images themselves in order to achieve an M-F1 of 0.82 for labelling between 12 major European crops.},
language = {en},
number = {14},
urldate = {2024-10-30},
journal = {Sensors},
author = {Yordanov, Momchil and d’Andrimont, Raphaël and Martinez-Sanchez, Laura and Lemoine, Guido and Fasbender, Dominique and van der Velde, Marijn},
month = jan,
year = {2023},
note = {Number: 14
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {agriculture, computer vision, data valorization, deep learning, image classification algorithms, mapping from imagery, plant recognition},
pages = {6298},
}
@article{petchiammal_paddy_2023,
title = {Paddy {Doctor}: {A} {Visual} {Image} {Dataset} for {Automated} {Paddy} {Disease} {Classification} and {Benchmarking}},
shorttitle = {Pe23},
url = {https://dl.acm.org/doi/10.1145/3570991.3570994},
doi = {10.1145/3570991.3570994},
abstract = {One of the critical biotic stress factors paddy farmers face is diseases caused by bacteria, fungi, and other organisms. These diseases affect plants’ health severely and lead to significant crop loss. Most of these diseases can be identified by regularly observing the leaves and stems under expert supervision. In a country with vast agricultural regions and limited crop protection experts, manual identification of paddy diseases is challenging. Thus, to add a solution to this problem, it is necessary to automate the disease identification process and provide easily accessible decision support tools to enable effective crop protection measures. However, the lack of availability of public datasets with detailed disease information limits the practical implementation of accurate disease detection systems. This paper presents Paddy Doctor, a visual image dataset for identifying paddy diseases. Our dataset contains 16,225 annotated paddy leaf images across 13 classes (12 diseases and normal leaf). We benchmarked the Paddy Doctor dataset using a Convolutional Neural Network (CNN) and four transfer learning based models (VGG16, MobileNet, Xception, and ResNet34). The experimental results showed that ResNet34 achieved the highest F1-score of 97.50\%. We release our dataset and reproducible code in the open source for community use.},
language = {en},
urldate = {2024-10-30},
journal = {Proceedings of the 6th Joint International Conference on Data Science \& Management of Data (10th ACM IKDD CODS and 28th COMAD)},
author = {{Petchiammal} and Kiruba, Briskline and {Murugan} and Arjunan, Pandarasamy},
month = jan,
year = {2023},
note = {Conference Name: CODS-COMAD 2023: 6th Joint International Conference on Data Science \& Management of Data (10th ACM IKDD CODS and 28th COMAD)
ISBN: 9781450397971
Place: Mumbai India
Publisher: ACM},
pages = {203--207},
}
@misc{giselsson_public_2017,
title = {A {Public} {Image} {Database} for {Benchmark} of {Plant} {Seedling} {Classification} {Algorithms}},
shorttitle = {Gi17},
url = {http://arxiv.org/abs/1711.05458},
doi = {10.48550/arXiv.1711.05458},
abstract = {A database of images of approximately 960 unique plants belonging to 12 species at several growth stages is made publicly available. It comprises annotated RGB images with a physical resolution of roughly 10 pixels per mm. To standardise the evaluation of classification results obtained with the database, a benchmark based on \$f\_\{1\}\$ scores is proposed. The dataset is available at https://vision.eng.au.dk/plant-seedlings-dataset},
urldate = {2024-10-30},
publisher = {arXiv},
author = {Giselsson, Thomas Mosgaard and Jørgensen, Rasmus Nyholm and Jensen, Peter Kryger and Dyrmann, Mads and Midtiby, Henrik Skov},
month = nov,
year = {2017},
note = {arXiv:1711.05458},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
}
@article{rashid_comprehensive_2023,
title = {Comprehensive dataset of annotated rice panicle image from {Bangladesh}},
volume = {51},
issn = {2352-3409},
shorttitle = {Ra23b},
url = {https://www.sciencedirect.com/science/article/pii/S2352340923008399},
doi = {10.1016/j.dib.2023.109772},
abstract = {Bangladesh's economy is primarily driven by the agriculture sector. Rice is one of the staple food of Bangladesh. The count of panicles per unit area serves as a widely used indicator for estimating rice yield, facilitating breeding efforts, and conducting phenotypic analysis. By calculating the number of panicles within a given area, researchers and farmers can assess crop density, plant health, and prospective production. The conventional method of estimating rice yields in Bangladesh is time-consuming, inaccurate, and inefficient. To address the challenge of detecting rice panicles, this article provides a comprehensive dataset of annotated rice panicle images from Bangladesh. Data collection was done by a drone equipped with a 4 K resolution camera, and it took place on April 25, 2023, in Bonkhoria Gazipur, Bangladesh. During the day, the drone captured the rice field from various heights and perspectives. After employing various image processing techniques for curation and annotation, the dataset was generated using images extracted from drone video clips, which were then annotated with information regarding rice panicles. The dataset is the largest publicly accessible collection of rice panicle images from Bangladesh, consisting of 2193 original images and 5701 augmented images.},
urldate = {2024-10-30},
journal = {Data in Brief},
author = {Rashid, Mohammad Rifat Ahmmad and Hossain, Md. Shafayat and Fahim, MD and Islam, Md. Shajibul and {Tahzib-E-Alindo} and Prito, Rizvee Hassan and Sheikh, Md. Shahadat Anik and Ali, Md Sawkat and Hasan, Mahamudul and Islam, Maheen},
month = dec,
year = {2023},
keywords = {Annotated image, Computer vision, Crop yield estimation, Object detection, Rice panicle},
pages = {109772},
}
@article{teimouri_novel_2022,
title = {Novel {Assessment} of {Region}-{Based} {CNNs} for {Detecting} {Monocot}/{Dicot} {Weeds} in {Dense} {Field} {Environments}},
volume = {12},
copyright = {http://creativecommons.org/licenses/by/3.0/},
issn = {2073-4395},
shorttitle = {{TJG22}},
url = {https://www.mdpi.com/2073-4395/12/5/1167},
doi = {10.3390/agronomy12051167},
abstract = {Weeding operations represent an effective approach to increase crop yields. Reliable and precise weed detection is a prerequisite for achieving high-precision weed monitoring and control in precision agriculture. To develop an effective approach for detecting weeds within the red, green, and blue (RGB) images, two state-of-the-art object detection models, EfficientDet (coefficient 3) and YOLOv5m, were trained on more than 26,000 in situ labeled images with monocot/dicot classes recorded from more than 200 different fields in Denmark. The dataset was collected using a high velocity camera (HVCAM) equipped with a xenon ring flash that overrules the sunlight and minimize shadows, which enables the camera to record images with a horizontal velocity of over 50 km h-1. Software-wise, a novel image processing algorithm was developed and utilized to generate synthetic images for testing the model performance on some difficult occluded images with weeds that were properly generated using the proposed algorithm. Both deep-learning networks were trained on in-situ images and then evaluated on both synthetic and new unseen in-situ images to assess their performances. The obtained average precision (AP) of both EfficientDet and YOLOv5 models on 6625 synthetic images were 64.27\% and 63.23\%, respectively, for the monocot class and 45.96\% and 37.11\% for the dicot class. These results confirmed that both deep-learning networks could detect weeds with high performance. However, it is essential to verify both the model’s robustness on in-situ images in which there is heavy occlusion with a complicated background. Therefore, 1149 in-field images were recorded in 5 different fields in Denmark and then utilized to evaluate both proposed model’s robustness. In the next step, by running both models on 1149 in-situ images, the AP of monocot/dicot for EfficientDet and YOLOv5 models obtained 27.43\%/42.91\% and 30.70\%/51.50\%, respectively. Furthermore, this paper provides information regarding challenges of monocot/dicot weed detection by releasing 1149 in situ test images with their corresponding labels (RoboWeedMap) publicly to facilitate the research in the weed detection domain within the precision agriculture field.},
language = {en},
number = {5},
urldate = {2024-10-25},
journal = {Agronomy},
author = {Teimouri, Nima and Jørgensen, Rasmus Nyholm and Green, Ole},
month = may,
year = {2022},
note = {Number: 5
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {computer vision, deep learning, synthetic image, weed detection},
pages = {1167},
}
@article{lammie_low-power_2019,
title = {Low-{Power} and {High}-{Speed} {Deep} {FPGA} {Inference} {Engines} for {Weed} {Classification} at the {Edge}},
volume = {7},
issn = {2169-3536},
shorttitle = {La19},
url = {https://ieeexplore.ieee.org/abstract/document/8693488},
doi = {10.1109/ACCESS.2019.2911709},
abstract = {Deep neural networks (DNNs) have recently achieved remarkable performance in a myriad of applications, ranging from image recognition to language processing. Training such networks on graphics processing units (GPUs) currently offers unmatched levels of performance; however, GPUs are subject to large-power requirements. With recent advancements in high-level synthesis (HLS) techniques, new methods for accelerating deep networks using field programmable gate arrays (FPGAs) are emerging. FPGA-based DNNs present substantial advantages in energy efficiency over conventional CPU- and GPU-accelerated networks. Using the Intel FPGA software development kit (SDK) for OpenCL development environment, networks described using the high-level OpenCL framework can be accelerated targeting heterogeneous platforms including CPUs, GPUs, and FPGAs. These networks, if properly customized on GPUs and FPGAs, can be ideal candidates for learning and inference in resource-constrained portable devices such as robots and the Internet of Things (IoT) edge devices, where power is limited and performance is critical. Here, we introduce GPU- and FPGA-accelerated deterministically binarized DNNs, tailored toward weed species classification for robotic weed control. Our developed networks are trained and benchmarked using a publicly available weed species dataset, named DeepWeeds, which include close to 18 000 weed images. We demonstrate that our FPGA-accelerated binarized networks significantly outperform their GPU-accelerated counterparts, achieving a{\textgreater}7-fold decrease in power consumption, while performing inference on weed images 2.86 times faster compared to our best performing baseline full-precision GPU implementation. These significant benefits are gained whilst losing only 1.17\% of validation accuracy. In this paper, this is a significant step toward enabling deep inference and learning on IoT edge devices, and smart portable machines such as agricultural robots, which is the target application.},
urldate = {2024-10-29},
journal = {IEEE Access},
author = {Lammie, Corey and Olsen, Alex and Carrick, Tony and Rahimi Azghadi, Mostafa},
year = {2019},
note = {Conference Name: IEEE Access},
keywords = {Acceleration, binarized neural networks (BNNs), convolutional neural networks (CNNs), deep neural networks (DNNs), Engines, Field programmable gate arrays, field programmable gate arrays (FPGAs), Graphics processing units, high-level synthesis (HLS), Internet of Things (IoT), Machine learning (ML), Robot kinematics, Training, weed classification},
pages = {51171--51184},
}
@article{chen_performance_2022,
title = {Performance evaluation of deep transfer learning on multi-class identification of common weed species in cotton production systems},
volume = {198},
issn = {0168-1699},
shorttitle = {Ch23},
url = {https://www.sciencedirect.com/science/article/pii/S0168169922004082},
doi = {10.1016/j.compag.2022.107091},
abstract = {Precision weed management offers a promising solution for sustainable cropping systems through the use of chemical-reduced/non-chemical robotic weeding techniques, which apply suitable control tactics to individual weeds or small clusters. Therefore, accurate identification of weed species plays a crucial role in such systems to enable precise, individualized weed treatment. Despite recent progress, the development of a robust weed identification and localization system in the presence of unstructured field environments remains a serious challenge, requiring supervised modeling using large volumes of annotated data. This paper makes a first comprehensive evaluation of deep transfer learning (DTL) for identifying common weed species specific to cotton (Gossypium hirsutum L.) production systems in southern United States (U.S.). A new dataset for weed identification was created, consisting of 5187 color images of 15 weed classes collected under natural light conditions and at varied weed growth stages, in cotton fields (primarily in Mississippi and North Carolina) during the 2020 and 2021 growth seasons. We evaluated 35 state-of-the-art deep learning models through transfer learning with repeated holdout validations and established an extensive benchmark for the considered weed identification task. DTL achieved high classification accuracy of F1 scores exceeding 95\%, requiring reasonably short training time (less than 2.5 h) across models. ResNeXt101 achieved the best overall F1-score of 98.93 ± 0.34\%, whereas 10 out of the 35 models achieved F1 scores near or above 98.0\%. However, the performance on minority weed classes with few training samples was less satisfactory for models trained with a conventional, unweighted cross entropy loss function. To address this issue, a weighted cross entropy loss function was adopted, which achieved substantially improved accuracies for minority weed classes (e.g., the F1-scores for Xception and MnasNet on the Spurred Anoda weed increased from 48\% to 90\% and 50\% to 82\%, respectively). Furthermore, a deep learning-based cosine similarity metric was employed to analyze the similarity among weed classes, assisting in the interpretation of classifications. Both the codes (https://github.com/Derekabc/CottonWeeds) for model benchmarking and the weed dataset (https://www.kaggle.com/yuzhenlu/cottonweedid15) of this study are made publicly available, which expect to be a valuable resource for future research on weed identification and beyond.},
urldate = {2024-10-24},
journal = {Computers and Electronics in Agriculture},
author = {Chen, Dong and Lu, Yuzhen and Li, Zhaojian and Young, Sierra},
month = jul,
year = {2022},
keywords = {Computer vision, Cotton, Deep transfer learning, Image classification, Weed management},
pages = {107091},
}