@inproceedings{9792ccab394947fc87c6f34b5d92dce8,
title = "PALOR: poisoning attacks against logistic regression",
abstract = "With Google, Amazon, Microsoft, and other entities establishing “Machine Learning as a Service” (MLaaS), ensuring the security of the resulting machine learning models has become an increasingly important topic. The security community has demonstrated that MLaaS contains many potential security risks, with new risks constantly being discovered. In this paper, we focus on one of these security risks – data poisoning attacks. Specifically, we analyze how attackers interfere with the results of logistic regression by poisoning the training datasets. To this end, we analyze and propose an alternative formulation for the optimization of poisoning training points capable of poisoning the logistic regression classifier, a model that has previously not been susceptible to poisoning attacks. We evaluate the performance of our proposed attack algorithm on the three real-world datasets of wine cultivars, adult census information, and breast cancer diagnostics. The success of our proposed formulation is evident in decreasing testing accuracy of logistic regression models exposed to an increasing number of poisoned training samples.",
keywords = "Data poisoning, Logistic regression, Machine learning",
author = "Jialin Wen and Zhao, {Benjamin Zi Hao} and Minhui Xue and Haifeng Qian",
year = "2020",
doi = "10.1007/978-3-030-55304-3_23",
language = "English",
isbn = "9783030553036",
series = "Lecture Notes In Computer Science",
publisher = "Springer, Springer Nature",
pages = "447--460",
editor = "Liu, {Joseph K.} and Hui Cui",
booktitle = "Information Security and Privacy",
address = "United States",
note = "25th Australasian Conference on Information Security and Privacy, ACISP 2020 ; Conference date: 30-11-2020 Through 02-12-2020",
}