@InProceedings{Sreepathi_Gateways2020_20201019, author = {Sarat Sreepathi and Min Xu and Nathan Collier and Jitendra Kumar and Jiafu Mao and Forrest M. Hoffman}, title = {Land Model Testbed: Accelerating Development, Benchmarking and Analysis of Land Surface Models}, booktitle = {Proceedings of the Gateways 2020 Conference}, publisher = {Open Science Framework}, doi = {10.17605/OSF.IO/X32A8}, day = 19, month = oct, year = 2020, abstract = {A Land Model Testbed (LMT), designed to provide a computational framework for systematically assessing model fidelity and supporting rapid development of complex multiscale models, offers a general-purpose workflow for conducting large ensemble simulations of multiple land surface models, post-processing large volumes of model output, and evaluating model results. It leverages existing tools for launching model simulations and the International Land Model Benchmarking (ILAMB) package for assessing model fidelity through comparison with best-available observational datasets. Increased complexity and proliferation of uncertain parameters in process representations in land surface models has driven the need for frequent and intensive testing and evaluating of models to quantify uncertainties and optimize parameters such that results are consistent with observations. The LMT described here meets these needs by providing tools to run thousands of ensemble simulations simultaneously and post-process their output files, by automating execution of an enhanced version of ILAMB with site-specific benchmarks and multivariate functional relationships, and by offering ensemble diagnostics and a customizable dashboard for displaying model performance metrics and associated graphics. We envision the LMT capabilities will serve as a foundational computational resource for a proposed user facility focused on terrestrial multiscale model--data integration.} }