BibTex format
@inbook{Fan:2023:10.1007/978-3-031-36027-5_34,
author = {Fan, H and Cheng, S and de, Nazelle AJ and Arcucci, R},
doi = {10.1007/978-3-031-36027-5_34},
pages = {430--437},
title = {An Efficient ViT-Based Spatial Interpolation Learner for Field Reconstruction},
url = {http://dx.doi.org/10.1007/978-3-031-36027-5_34},
year = {2023}
}
RIS format (EndNote, RefMan)
TY - CHAP
AB - In the field of large-scale field reconstruction, Kriging has been a commonly used technique for spatial interpolation at unobserved locations. However, Kriging’s effectiveness is often restricted when dealing with non-Gaussian or non-stationary real-world fields, and it can be computationally expensive. On the other hand, supervised deep learning models can potentially address these limitations by capturing underlying patterns between observations and corresponding fields. In this study, we introduce a novel deep learning model that utilizes vision transformers and autoencoders for large-scale field reconstruction. The new model is named ViTAE. The proposed model is designed specifically for large-scale and complex field reconstruction. Experimental results demonstrate the superiority of ViTAE over Kriging. Additionally, the proposed ViTAE model runs more than 1000 times faster than Kriging, enabling real-time field reconstructions.
AU - Fan,H
AU - Cheng,S
AU - de,Nazelle AJ
AU - Arcucci,R
DO - 10.1007/978-3-031-36027-5_34
EP - 437
PY - 2023///
SP - 430
TI - An Efficient ViT-Based Spatial Interpolation Learner for Field Reconstruction
UR - http://dx.doi.org/10.1007/978-3-031-36027-5_34
ER -