@inproceedings{gill2025tracefl,title={{TraceFL: Interpretability-Driven Debugging in Federated Learning via Neuron Provenance}},author={Gill, Waris and Anwar, Ali and Gulzar, Muhammad Ali},booktitle={2025 IEEE/ACM 47th International Conference on Software Engineering (ICSE)},year={2025},organization={IEEE},}
2024
MeanCache: User-Centric Semantic Cache for Large Language Model Based Web Services
Waris
Gill, Mohamed
Elidrisi, Pallavi
Kalapatapu, and
2 more authors
@article{gill2024privacy,title={MeanCache: User-Centric Semantic Cache for Large Language Model Based Web Services},author={Gill, Waris and Elidrisi, Mohamed and Kalapatapu, Pallavi and Anwar, Ali and Gulzar, Muhammad Ali},journal={arXiv preprint arXiv:2403.02694},year={2024},}
2023
FedDebug: Systematic Debugging for Federated Learning Applications
Waris
Gill, Ali
Anwar, and Muhammad Ali
Gulzar
In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), 2023
In Federated Learning (FL), clients independently train local models and share them with a central aggregator to build a global model. Impermissibility to access clients’ data and collaborative training make FL appealing for applications with data-privacy concerns, such as medical imaging. However, these FL characteristics pose unprecedented challenges for debugging. When a global model’s performance deteriorates, identifying the responsible rounds and clients is a major pain point. Developers resort to trial-and-error debugging with subsets of clients, hoping to increase the global model’s accuracy or let future FL rounds retune the model, which are time-consuming and costly. We design a systematic fault localization framework, Fedde-bug,that advances the FL debugging on two novel fronts. First, Feddebug enables interactive debugging of realtime collaborative training in FL by leveraging record and replay techniques to construct a simulation that mirrors live FL. Feddebug’sbreakpoint can help inspect an FL state (round, client, and global model) and move between rounds and clients’ models seam-lessly, enabling a fine-grained step-by-step inspection. Second, Feddebug automatically identifies the client(s) responsible for lowering the global model’s performance without any testing data and labels-both are essential for existing debugging techniques. Feddebug’s strengths come from adapting differential testing in conjunction with neuron activations to determine the client(s) deviating from normal behavior. Feddebug achieves 100% accuracy in finding a single faulty client and 90.3% accuracy in finding multiple faulty clients. Feddebug’s interactive de-bugging incurs 1.2% overhead during training, while it localizes a faulty client in only 2.1% of a round’s training time. With FedDebug,we bring effective debugging practices to federated learning, improving the quality and productivity of FL application developers.
@inproceedings{gill2023FedDebug,author={Gill, Waris and Anwar, Ali and Gulzar, Muhammad Ali},booktitle={2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE)},title={{FedDebug: Systematic Debugging for Federated Learning Applications}},year={2023},pages={512-523},keywords={},issn={1558-1225},}
FedDefender: Backdoor Attack Defense in Federated Learning
Waris
Gill, Ali
Anwar, and Muhammad Ali
Gulzar
In Proceedings of the 1st International Workshop on Dependability and Trustworthiness of Safety-Critical Systems with Machine Learned Components, , San Francisco, CA, USA, , 2023
Federated Learning (FL) is a privacy-preserving distributed machine learning technique that enables individual clients (e.g., user participants, edge devices, or organizations) to train a model on their local data in a secure environment and then share the trained model with an aggregator to build a global model collaboratively. In this work, we propose FedDefender, a defense mechanism against targeted poisoning attacks in FL by leveraging differential testing. FedDefender first applies differential testing on clients’ models using a synthetic input. Instead of comparing the output (predicted label), which is unavailable for synthetic input, FedDefender fingerprints the neuron activations of clients’ models to identify a potentially malicious client containing a backdoor. We evaluate FedDefender using MNIST and FashionMNIST datasets with 20 and 30 clients, and our results demonstrate that FedDefender effectively mitigates such attacks, reducing the attack success rate (ASR) to 10% without deteriorating the global model performance.
@inproceedings{gill2023FedDefender,author={Gill, Waris and Anwar, Ali and Gulzar, Muhammad Ali},title={{FedDefender: Backdoor Attack Defense in Federated Learning}},year={2023},isbn={9798400703799},publisher={Association for Computing Machinery},address={New York, NY, USA},booktitle={Proceedings of the 1st International Workshop on Dependability and Trustworthiness of Safety-Critical Systems with Machine Learned Components},pages={6–9},numpages={4},keywords={fault localization, testing, differential testing, poisoning attack, federated learning, backdoor attack, deep learning},location={, San Francisco, CA, USA, },series={SE4SafeML 2023},}