@inproceedings{db6a5f7370294792b648180b490a1daa,
title = "Stochastic Large-scale Machine Learning Algorithms with Distributed Features and Observations",
abstract = "As the size of modern datasets exceeds the disk and memory capacities of a single computer, machine learning practitioners have resorted to parallel and distributed computing. Given that optimization is one of the pillars of machine learning and predictive modeling, distributed optimization methods have recently garnered ample attention, in particular when either observations or features are distributed, but not both. We propose a general stochastic algorithm where observations, features, and gradient components can be sampled in a double distributed setting, i.e., with both features and observations distributed. Very technical analyses establish convergence properties of the algorithm under different conditions on the learning rate (diminishing to zero or constant). Computational experiments in Spark demonstrate a superior performance of our algorithm versus a benchmark in early iterations of the algorithm, which is due to the stochastic components of the algorithm.",
keywords = "Convexity, Large Scale, Machine Learning, Optimization, Stochasticity",
author = "Biyi Fang and Diego Klabjan and Truong Vo",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 2024 IEEE International Conference on Big Data, BigData 2024 ; Conference date: 15-12-2024 Through 18-12-2024",
year = "2024",
doi = "10.1109/BigData62323.2024.10825730",
language = "English (US)",
series = "Proceedings - 2024 IEEE International Conference on Big Data, BigData 2024",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "209--242",
editor = "Wei Ding and Chang-Tien Lu and Fusheng Wang and Liping Di and Kesheng Wu and Jun Huan and Raghu Nambiar and Jundong Li and Filip Ilievski and Ricardo Baeza-Yates and Xiaohua Hu",
booktitle = "Proceedings - 2024 IEEE International Conference on Big Data, BigData 2024",
address = "United States",
}