@inproceedings{eae4a02e48c74c2288f20ed90104b7fa,
title = "Invariance Through Latent Alignment",
abstract = "A robot{\textquoteright}s deployment environment often involves perceptual changes that differ from what it has experienced during training. Standard practices such as data augmentation attempt to bridge this gap by augmenting source images in an effort to extend the support of the training distribution to better cover what the agent might experience at test time. In many cases, however, it is impossible to know test-time distribution-shift a priori, making these schemes infeasible. In this paper, we introduce a general approach, called Invariance through Latent Alignment (ILA), that improves the test-time performance of a visuomotor control policy in deployment environments with unknown perceptual variations. ILA performs unsupervised adaptation at deploymenttime by matching the distribution of latent features on the target domain to the agent{\textquoteright}s prior experience, without relying on paired data. Although simple, we show that this idea leads to surprising improvements on a variety of challenging adaptation scenarios, including changes in lighting conditions, the content in the scene, and camera poses. We present results on calibrated control benchmarks in simulation—the distractor control suite—and a physical robot under a sim-to-real setup. Video and code available at: https://takuma.yoneda.xyz/projects/ila",
author = "Takuma Yoneda and Ge Yang and Walter, {Matthew R.} and Stadie, {Bradly C.}",
note = "Publisher Copyright: {\textcopyright} 2022, MIT Press Journals. All rights reserved.; 18th Robotics: Science and Systems, RSS 2022 ; Conference date: 27-06-2022",
year = "2022",
doi = "10.15607/RSS.2022.XVIII.064",
language = "English (US)",
isbn = "9780992374785",
series = "Robotics: Science and Systems",
publisher = "MIT Press Journals",
editor = "Kris Hauser and Dylan Shell and Shoudong Huang",
booktitle = "Robotics",
address = "United States",
}