Selected Publications
-
Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam
@misc{AtamnaMausKievelitzEtAl2025, author = {Atamna, Asma and Maus, Tom and Kievelitz, Fabian and Glasmachers, Tobias}, title = {Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam}, year = {2025}, }Atamna, A., Maus, T., Kievelitz, F., & Glasmachers, T.. (2025). Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam. Retrieved from https://arxiv.org/abs/2508.054082024
ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource AllocationPendyala, A., Dettmer, J., Glasmachers, T., & Atamna, A.In Machine Learning, Optimization, and Data Science (pp. 78–92) Cham: Springer Nature Switzerland@inproceedings{PendyalaDettmerGlasmachersEtAl2024, author = {Pendyala, Abhijeet and Dettmer, Justin and Glasmachers, Tobias and Atamna, Asma}, title = {ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource Allocation}, booktitle = {Machine Learning, Optimization, and Data Science}, pages = {78–92}, publisher = {Springer Nature Switzerland}, address = {Cham}, year = {2024}, }Pendyala, A., Dettmer, J., Glasmachers, T., & Atamna, A.. (2024). ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource Allocation. In Machine Learning, Optimization, and Data Science (pp. 78–92). Cham: Springer Nature Switzerland.-
Balancing Specialization and Centralization: A Multi-Agent Reinforcement Learning Benchmark for Sequential Industrial Control
@article{MausAtamnaGlasmachers2025b, author = {Maus, Tom and Atamna, Asma and Glasmachers, Tobias}, title = {Balancing Specialization and Centralization: A Multi-Agent Reinforcement Learning Benchmark for Sequential Industrial Control}, month = {October}, year = {2025}, doi = {10.48550/ARXIV.2510.20408}, }Maus, T., Atamna, A., & Glasmachers, T.. (2025). Balancing Specialization and Centralization: A Multi-Agent Reinforcement Learning Benchmark for Sequential Industrial Control. http://doi.org/10.48550/ARXIV.2510.20408Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam@misc{AtamnaMausKievelitzEtAl2025, author = {Atamna, Asma and Maus, Tom and Kievelitz, Fabian and Glasmachers, Tobias}, title = {Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam}, year = {2025}, }Atamna, A., Maus, T., Kievelitz, F., & Glasmachers, T.. (2025). Cumulative Learning Rate Adaptation: Revisiting Path-Based Schedules for SGD and Adam. Retrieved from https://arxiv.org/abs/2508.05408Leveraging Genetic Algorithms for Efficient Demonstration Generation in Real-World Reinforcement Learning Environments@article{MausAtamnaGlasmachers2025, author = {Maus, Tom and Atamna, Asma and Glasmachers, Tobias}, title = {Leveraging Genetic Algorithms for Efficient Demonstration Generation in Real-World Reinforcement Learning Environments}, year = {2025}, doi = {10.48550/ARXIV.2507.00762}, }Maus, T., Atamna, A., & Glasmachers, T.. (2025). Leveraging Genetic Algorithms for Efficient Demonstration Generation in Real-World Reinforcement Learning Environments. http://doi.org/10.48550/ARXIV.2507.007622024
Solving a Real-World Optimization Problem Using Proximal Policy Optimization with Curriculum Learning and Reward EngineeringPendyala, A., Atamna, A., & Glasmachers, T.In Machine Learning and Knowledge Discovery in Databases. Applied Data Science Track (ECML-PKDD) (pp. 150–165) Cham: Springer Nature Switzerland@inproceedings{PendyalaAtamnaGlasmachers2024, author = {Pendyala, Abhijeet and Atamna, Asma and Glasmachers, Tobias}, title = {Solving a Real-World Optimization Problem Using Proximal Policy Optimization with Curriculum Learning and Reward Engineering}, booktitle = {Machine Learning and Knowledge Discovery in Databases. Applied Data Science Track (ECML-PKDD)}, pages = {150–165}, publisher = {Springer Nature Switzerland}, address = {Cham}, year = {2024}, }Pendyala, A., Atamna, A., & Glasmachers, T.. (2024). Solving a Real-World Optimization Problem Using Proximal Policy Optimization with Curriculum Learning and Reward Engineering. In Machine Learning and Knowledge Discovery in Databases. Applied Data Science Track (ECML-PKDD) (pp. 150–165). Cham: Springer Nature Switzerland.ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource AllocationPendyala, A., Dettmer, J., Glasmachers, T., & Atamna, A.In Machine Learning, Optimization, and Data Science (pp. 78–92) Cham: Springer Nature Switzerland@inproceedings{PendyalaDettmerGlasmachersEtAl2024, author = {Pendyala, Abhijeet and Dettmer, Justin and Glasmachers, Tobias and Atamna, Asma}, title = {ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource Allocation}, booktitle = {Machine Learning, Optimization, and Data Science}, pages = {78–92}, publisher = {Springer Nature Switzerland}, address = {Cham}, year = {2024}, }Pendyala, A., Dettmer, J., Glasmachers, T., & Atamna, A.. (2024). ContainerGym: A Real-World Reinforcement Learning Benchmark for Resource Allocation. In Machine Learning, Optimization, and Data Science (pp. 78–92). Cham: Springer Nature Switzerland.The Institut für Neuroinformatik (INI) is a research unit of the Faculties of Computer Science and Medicine at the Ruhr-Universität Bochum. Its scientific goal is to understand the fundamental principles through which organisms generate behavior and cognition while linked to their environments through sensory and effector systems. Inspired by our insights into such natural cognitive systems, we seek new solutions to problems of information processing in artificial cognitive systems. We draw from a variety of disciplines that include experimental psychology and neurophysiology as well as machine learning, neural artificial intelligence, computer vision, and robotics.
Universitätsstr. 150, Building NB, Room 3/32
D-44801 Bochum, GermanyTel: (+49) 234 32-28967
Fax: (+49) 234 32-14210
2025