diff --git a/content/authors/alumnus/_index.md b/content/authors/alumnus/_index.md index 6d11b00..20642e7 100644 --- a/content/authors/alumnus/_index.md +++ b/content/authors/alumnus/_index.md @@ -26,6 +26,5 @@ highlight_name: false user_groups: - Alumni ---- - +---xw An alumnus \ No newline at end of file diff --git a/publications.bib b/publications.bib new file mode 100644 index 0000000..3a8ab8a --- /dev/null +++ b/publications.bib @@ -0,0 +1,27 @@ +@inproceedings{huang-etal-2022-lightweight, + title = "Lightweight Contextual Logical Structure Recovery", + author = "Huang, Po-Wei and + Ramesh Kashyap, Abhinav and + Qin, Yanxia and + Yang, Yajing and + Kan, Min-Yen", + editor = "Cohan, Arman and + Feigenblat, Guy and + Freitag, Dayne and + Ghosal, Tirthankar and + Herrmannova, Drahomira and + Knoth, Petr and + Lo, Kyle and + Mayr, Philipp and + Shmueli-Scheuer, Michal and + de Waard, Anita and + Wang, Lucy Lu", + booktitle = "Proceedings of the Third Workshop on Scholarly Document Processing", + month = oct, + year = "2022", + address = "Gyeongju, Republic of Korea", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.sdp-1.5", + pages = "37--48", + abstract = "Logical structure recovery in scientific articles associates text with a semantic section of the article. Although previous work has disregarded the surrounding context of a line, we model this important information by employing line-level attention on top of a transformer-based scientific document processing pipeline. With the addition of loss function engineering and data augmentation techniques with semi-supervised learning, our method improves classification performance by 10{\%} compared to a recent state-of-the-art model. Our parsimonious, text-only method achieves a performance comparable to that of other works that use rich document features such as font and spatial position, using less data without sacrificing performance, resulting in a lightweight training pipeline.", +}