diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib index 86b8a1d..642f3a1 100644 --- a/_bibliography/papers.bib +++ b/_bibliography/papers.bib @@ -1,23 +1,7 @@ --- --- -@article{Decentralized_SOCO_SIGMETRICS_2025, - abbr={SIGMETRICS}, - title={Learning-Augmented Decentralized Online Convex Optimization in Networks}, - author={Pengfei Li and Jianyi Yang and Adam Wierman and Shaolei Ren}, - abstract={This paper studies learning-augmented decentralized online convex optimization in a networked multi-agent system, a challenging setting that has remained under-explored. We first consider a linear learning-augmented decentralized online algorithm (LADO-Lin) that combines a machine learning (ML) policy with a baseline expert policy in a linear manner. We show that, while LADO-Lin can exploit the potential of ML predictions to improve the average cost performance, it cannot have guaranteed worst-case performance. To address this limitation, we propose a novel online algorithm (LADO) that adaptively combines the ML policy and expert policy to safeguard the ML predictions to achieve strong competitiveness guarantees. We also prove the average cost bound for LADO, revealing the tradeoff between average performance and worst-case robustness and demonstrating the advantage of training the ML policy by explicitly considering the robustness requirement. Finally, we run an experiment on decentralized battery management for sustainable computing. Our results highlight the potential of ML augmentation to improve the average performance as well as the guaranteed worst-case performance of LADO.}, - journal={SIGMETRICS (preprint available and pending final reivsion)}, - month = {}, - year={2025}, - url={https://arxiv.org/abs/2306.10158}, - html={https://arxiv.org/abs/2306.10158}, - bibtex_show = {true}, - selected={true}, - recenthighlights={true}, - topic = {green}, - timerange = {21to25}, - show = {true} -} + @article{AI_Water_CACM_2024, abbr={CACM}, @@ -37,6 +21,24 @@ @article{AI_Water_CACM_2024 show = {true} } +@article{Decentralized_SOCO_SIGMETRICS_2025, + abbr={SIGMETRICS}, + title={Learning-Augmented Decentralized Online Convex Optimization in Networks}, + author={Pengfei Li and Jianyi Yang and Adam Wierman and Shaolei Ren}, + abstract={This paper studies learning-augmented decentralized online convex optimization in a networked multi-agent system, a challenging setting that has remained under-explored. We first consider a linear learning-augmented decentralized online algorithm (LADO-Lin) that combines a machine learning (ML) policy with a baseline expert policy in a linear manner. We show that, while LADO-Lin can exploit the potential of ML predictions to improve the average cost performance, it cannot have guaranteed worst-case performance. To address this limitation, we propose a novel online algorithm (LADO) that adaptively combines the ML policy and expert policy to safeguard the ML predictions to achieve strong competitiveness guarantees. We also prove the average cost bound for LADO, revealing the tradeoff between average performance and worst-case robustness and demonstrating the advantage of training the ML policy by explicitly considering the robustness requirement. Finally, we run an experiment on decentralized battery management for sustainable computing. Our results highlight the potential of ML augmentation to improve the average performance as well as the guaranteed worst-case performance of LADO.}, + journal={SIGMETRICS (preprint available and pending final reivsion)}, + month = {}, + year={2025}, + url={https://arxiv.org/abs/2306.10158}, + html={https://arxiv.org/abs/2306.10158}, + bibtex_show = {true}, + selected={true}, + recenthighlights={true}, + topic = {green}, + timerange = {21to25}, + show = {true} +} + @article{LLM_Watermark_Spoofing_NeurIPS_2024, abbr={NeurIPS}, title={Bileve: Securing Text Provenance in Large Language Models Against Spoofing with Bi-level Signature},