From f2553c65b3176afb4f1f022ac257c5f079c568a8 Mon Sep 17 00:00:00 2001 From: Shaolei Ren <74640564+shaoleiren@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:22:35 -0800 Subject: [PATCH] Update papers.bib --- _bibliography/papers.bib | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib index 642f3a1..e739a7e 100644 --- a/_bibliography/papers.bib +++ b/_bibliography/papers.bib @@ -26,7 +26,7 @@ @article{Decentralized_SOCO_SIGMETRICS_2025 title={Learning-Augmented Decentralized Online Convex Optimization in Networks}, author={Pengfei Li and Jianyi Yang and Adam Wierman and Shaolei Ren}, abstract={This paper studies learning-augmented decentralized online convex optimization in a networked multi-agent system, a challenging setting that has remained under-explored. We first consider a linear learning-augmented decentralized online algorithm (LADO-Lin) that combines a machine learning (ML) policy with a baseline expert policy in a linear manner. We show that, while LADO-Lin can exploit the potential of ML predictions to improve the average cost performance, it cannot have guaranteed worst-case performance. To address this limitation, we propose a novel online algorithm (LADO) that adaptively combines the ML policy and expert policy to safeguard the ML predictions to achieve strong competitiveness guarantees. We also prove the average cost bound for LADO, revealing the tradeoff between average performance and worst-case robustness and demonstrating the advantage of training the ML policy by explicitly considering the robustness requirement. Finally, we run an experiment on decentralized battery management for sustainable computing. Our results highlight the potential of ML augmentation to improve the average performance as well as the guaranteed worst-case performance of LADO.}, - journal={SIGMETRICS (preprint available and pending final reivsion)}, + journal={SIGMETRICS}, month = {}, year={2025}, url={https://arxiv.org/abs/2306.10158}, @@ -44,7 +44,7 @@ @article{LLM_Watermark_Spoofing_NeurIPS_2024 title={Bileve: Securing Text Provenance in Large Language Models Against Spoofing with Bi-level Signature}, author={Tong Zhou and Xuandong Zhao and Xiaolin Xu and Shaolei Ren}, abstract={Text watermarks for large language models (LLMs) have been commonly used to identify the origins of machine-generated content, which is promising for assessing liability when combating deepfake or harmful content. While existing watermarking techniques typically prioritize robustness against removal attacks, unfortunately, they are vulnerable to spoofing attacks: malicious actors can subtly alter the meanings of LLM-generated responses or even forge harmful content, potentially misattributing blame to the LLM developer. To overcome this, we introduce a bi-level signature scheme, Bileve, which embeds fine-grained signature bits for integrity checks (mitigating spoofing attacks) as well as a coarse-grained signal to trace text sources when the signature is invalid (enhancing detectability) via a novel rank-based sampling strategy. Compared to conventional watermark detectors that only output binary results, Bileve can differentiate 5 scenarios during detection, reliably tracing text provenance and regulating LLMs. The experiments conducted on OPT-1.3B and LLaMA-7B demonstrate the effectiveness of Bileve in defeating spoofing attacks with enhanced detectability.}, - journal={NeruIPS (preprint available and pending final reivsion)}, + journal={NeruIPS}, month = {}, year={2024}, url={https://arxiv.org/abs/2406.01946}, @@ -105,8 +105,8 @@ @article{OnlineBudgetedMatching_General_NeurIPS_2024 journal={NeurIPS}, month = {}, year={2024}, - url={https://shaoleiren.github.io/}, - html={https://shaoleiren.github.io/}, + url={https://arxiv.org/abs/2411.04204}, + html={https://arxiv.org/abs/2411.04204}, bibtex_show = {true}, selected={true}, recenthighlights={true}, @@ -116,7 +116,7 @@ @article{OnlineBudgetedMatching_General_NeurIPS_2024 } -@article{OnlineBudgetedMatching_General_NeurIPS_2024, +@article{SafePlay_Games_Learning_NeurIPS_2024, abbr={NeurIPS}, title={Safe Exploitative Play in Stochastic Bayesian Games with Untrusted Type Beliefs}, author={Tongxin Li and Tinashe Handina and Shaolei Ren and Adam Wierman}, @@ -124,8 +124,8 @@ @article{OnlineBudgetedMatching_General_NeurIPS_2024 journal={NeurIPS}, month = {}, year={2024}, - url={https://shaoleiren.github.io/}, - html={https://shaoleiren.github.io/}, + url={https://openreview.net/forum?id=QZtJ22aOV4}, + html={https://openreview.net/forum?id=QZtJ22aOV4}, bibtex_show = {true}, selected={true}, recenthighlights={false},