generated from alshedivat/al-folio
-
Notifications
You must be signed in to change notification settings - Fork 0
/
papers.bib
13 lines (12 loc) · 2.2 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
---
---
@unpublished{lei2025TROVE,
title = {{{TROVE}}: {{A Travel Mode Identification Framework}} via {{Contrastive Fusion}} of {{Multi-view Trajectory Representations}}},
author = {Lei, Yutian and Guan, Xuefeng and Wu, HuaYi},
year={2025},
langid = {english},
selected={true},
abbr={Manuscript},
preview={traj_visualize.png},
abstract={Travel Mode Identification (TMI) has emerged as a crucial research topic in intelligent transportation. Accurate TMI automatically identifies travel modes from large-scale GPS trajectory datasets, which is essential for traffic management and travel planning. Conventional methods have improved accuracy by fusing spatial patterns and kinematic attributes at the input level. However, feature-level fusion approaches, which independently extract high-level trajectory features and align these complementary representations for cross-view semantic comprehension, remain underexplored. To address this gap, we propose MVCF-TMI, a novel framework that enhances TMI accuracy and model generalizability by aligning the kinematic and spatial views of GPS data through Multi-View Contrastive learning. Specifically, our framework exploits comprehensive spatial and kinematic characteristics leveraging a multi-view paradigm. Then, to enable better cross-view semantic understanding, we engage an inter-view contrastive loss function, optimizing feature alignment toward more effective view-specific representations within a shared subspace. This technique facilitates feature interaction, enhances feature consistency, and captures complementary information across distinct multi-view trajectory representations. Extensive experiments demonstrate that MVCF-TMI outperforms baseline approaches with a competitive accuracy of 86.45% on the large-scale dataset Geolife, demonstrating our method's ability to learn more discriminative and robust representations for the TMI task. The robust generalization capability of the proposed method was also observed. The MVCF-TMI model, pre-trained using self-supervised and supervised paradigms on the large-scale dataset, was transferred to low-data scenario on the SHL dataset, and demonstrated improved accuracy and robustness in this challenging setting.}
}