Skip to content

Commit

Permalink
Update papers.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
shaoleiren committed Nov 8, 2024
1 parent 1b4ef9b commit f2553c6
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions _bibliography/papers.bib
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ @article{Decentralized_SOCO_SIGMETRICS_2025
title={Learning-Augmented Decentralized Online Convex Optimization in Networks},
author={Pengfei Li and Jianyi Yang and Adam Wierman and Shaolei Ren},
abstract={This paper studies learning-augmented decentralized online convex optimization in a networked multi-agent system, a challenging setting that has remained under-explored. We first consider a linear learning-augmented decentralized online algorithm (LADO-Lin) that combines a machine learning (ML) policy with a baseline expert policy in a linear manner. We show that, while LADO-Lin can exploit the potential of ML predictions to improve the average cost performance, it cannot have guaranteed worst-case performance. To address this limitation, we propose a novel online algorithm (LADO) that adaptively combines the ML policy and expert policy to safeguard the ML predictions to achieve strong competitiveness guarantees. We also prove the average cost bound for LADO, revealing the tradeoff between average performance and worst-case robustness and demonstrating the advantage of training the ML policy by explicitly considering the robustness requirement. Finally, we run an experiment on decentralized battery management for sustainable computing. Our results highlight the potential of ML augmentation to improve the average performance as well as the guaranteed worst-case performance of LADO.},
journal={SIGMETRICS (preprint available and pending final reivsion)},
journal={SIGMETRICS},
month = {},
year={2025},
url={https://arxiv.org/abs/2306.10158},
Expand All @@ -44,7 +44,7 @@ @article{LLM_Watermark_Spoofing_NeurIPS_2024
title={Bileve: Securing Text Provenance in Large Language Models Against Spoofing with Bi-level Signature},
author={Tong Zhou and Xuandong Zhao and Xiaolin Xu and Shaolei Ren},
abstract={Text watermarks for large language models (LLMs) have been commonly used to identify the origins of machine-generated content, which is promising for assessing liability when combating deepfake or harmful content. While existing watermarking techniques typically prioritize robustness against removal attacks, unfortunately, they are vulnerable to spoofing attacks: malicious actors can subtly alter the meanings of LLM-generated responses or even forge harmful content, potentially misattributing blame to the LLM developer. To overcome this, we introduce a bi-level signature scheme, Bileve, which embeds fine-grained signature bits for integrity checks (mitigating spoofing attacks) as well as a coarse-grained signal to trace text sources when the signature is invalid (enhancing detectability) via a novel rank-based sampling strategy. Compared to conventional watermark detectors that only output binary results, Bileve can differentiate 5 scenarios during detection, reliably tracing text provenance and regulating LLMs. The experiments conducted on OPT-1.3B and LLaMA-7B demonstrate the effectiveness of Bileve in defeating spoofing attacks with enhanced detectability.},
journal={NeruIPS (preprint available and pending final reivsion)},
journal={NeruIPS},
month = {},
year={2024},
url={https://arxiv.org/abs/2406.01946},
Expand Down Expand Up @@ -105,8 +105,8 @@ @article{OnlineBudgetedMatching_General_NeurIPS_2024
journal={NeurIPS},
month = {},
year={2024},
url={https://shaoleiren.github.io/},
html={https://shaoleiren.github.io/},
url={https://arxiv.org/abs/2411.04204},
html={https://arxiv.org/abs/2411.04204},
bibtex_show = {true},
selected={true},
recenthighlights={true},
Expand All @@ -116,16 +116,16 @@ @article{OnlineBudgetedMatching_General_NeurIPS_2024
}


@article{OnlineBudgetedMatching_General_NeurIPS_2024,
@article{SafePlay_Games_Learning_NeurIPS_2024,
abbr={NeurIPS},
title={Safe Exploitative Play in Stochastic Bayesian Games with Untrusted Type Beliefs},
author={Tongxin Li and Tinashe Handina and Shaolei Ren and Adam Wierman},
abstract={The combination of the Bayesian game and learning has a rich history, with the idea of controlling a single agent in a system composed of multiple agents with unknown behaviors given a set of types, each specifying a possible behavior for the other agents. The idea is to plan an agent's own actions with respect to those types which it believes are most likely to maximize the payoff. However, the type beliefs are often learned from past actions and likely to be incorrect. With this perspective in mind, we consider an agent in a game with type predictions of other components, and investigate the impact of incorrect beliefs to the agent’s payoff. In particular, we formally define a trade-off between risk and opportunity by comparing the payoff obtained against the optimal payoff, which is represented by a gap caused by trusting or distrusting the learned beliefs. Our main results characterize the trade-off by providing upper and lower bounds on the payoff gap for both normal-form and stochastic Bayesian games, with numerical results provided.},
journal={NeurIPS},
month = {},
year={2024},
url={https://shaoleiren.github.io/},
html={https://shaoleiren.github.io/},
url={https://openreview.net/forum?id=QZtJ22aOV4},
html={https://openreview.net/forum?id=QZtJ22aOV4},
bibtex_show = {true},
selected={true},
recenthighlights={false},
Expand Down

0 comments on commit f2553c6

Please sign in to comment.