BibTex format

author = {Zhang, F and Demiris, Y},
doi = {10.1109/lra.2023.3296371},
journal = {IEEE Robotics and Automation Letters},
pages = {5512--5519},
title = {Visual-tactile learning of garment unfolding for robot-assisted dressing},
url = {},
volume = {8},
year = {2023}

RIS format (EndNote, RefMan)

AB - Assistive robots have the potential to support disabled and elderly people in daily dressing activities. An intermediate stage of dressing is to manipulate the garment from a crumpled initial state to an unfolded configuration that facilitates robust dressing. Applying quasi-static grasping actions with vision feedback on garment unfolding usually suffers from occluded grasping points. In this work, we propose a dynamic manipulation strategy: tracing the garment edge until the hidden corner is revealed. We introduce a model-based approach, where a deep visual-tactile predictive model iteratively learns to perform servoing from raw sensor data. The predictive model is formalized as Conditional Variational Autoencoder with contrastive optimization, which jointly learns underlying visual-tactile latent representations, a latent garment dynamics model, and future predictions of garment states. Two cost functions are explored: the visual cost, defined by garment corner positions, guarantees the gripper to move towards the corner, while the tactile cost, defined by garment edge poses, prevents the garment from falling from the gripper. The experimental results demonstrate the improvement of our contrastive visual-tactile model predictive control over single sensing modality and baseline model learning techniques. The proposed method enables a robot to unfold back-opening hospital gowns and perform upper-body dressing.
AU - Zhang,F
AU - Demiris,Y
DO - 10.1109/lra.2023.3296371
EP - 5519
PY - 2023///
SN - 2377-3766
SP - 5512
TI - Visual-tactile learning of garment unfolding for robot-assisted dressing
T2 - IEEE Robotics and Automation Letters
UR -
UR -
VL - 8
ER -