@inproceedings{94d215be1c25492ea809359ca4d5ce9e,
title = "Pre-loaded Deep-Q Learning",
abstract = "This paper explores the potentiality of pre-loading deep-Q learning agents{\textquoteright} replay memory buffers with experiences generated by preceding agents, so as to bolster their initial performance. The research illustrates that this pre-loading of previously generated experience replays does indeed improve the initial performance of new agents, provided that an appropriate degree of ostensibly undesirable activity was expressed in the preceding agent{\textquoteright}s behaviour.",
keywords = "Deep-Q learning, Experience replay, Neural networks, Q-learning, Reinforcement learning",
author = "Tristan Falck and Elize Ehlers",
note = "Publisher Copyright: {\textcopyright} 2022, IFIP International Federation for Information Processing.; 12th IFIP TC 12 International Conference on Intelligent Information Processing, IIP 2022 ; Conference date: 27-05-2022 Through 30-05-2022",
year = "2022",
doi = "10.1007/978-3-031-03948-5_14",
language = "English",
isbn = "9783031039478",
series = "IFIP Advances in Information and Communication Technology",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "159--172",
editor = "Zhongzhi Shi and Jean-Daniel Zucker and Bo An",
booktitle = "Intelligent Information Processing XI - 12th IFIP TC 12 International Conference, IIP 2022, Proceedings",
address = "Germany",
}