@inproceedings{241ff47b687442559a9edea13b559ff5,
title = "Model based path planning using Q-Learning",
abstract = "Though the classical robotics is highly proficient in accomplishing a lot of complex tasks, still it is far from exhibiting the human-like natural intelligence in terms of flexibility and reliability to work in dynamic scenarios. In order to render these qualities in the robots, reinforcement learning could prove to be quite effective. By employing learning based training provided by reinforcement learning methods, a robot can be made to learn to work in previously unforeseen situations. Still this learning task can be quite cumbersome due to its requirement of the huge amount of training data which makes the training quite inefficient in the real world scenarios. The paper proposes a model based path planning method using the e greedy based Q-learning. The scenario was modeled using a grid-world based simulator which is being used in the initial training of the agent. The trained policy is then improved to learn the real world dynamics by using the real world samples. This study proves the efficiency and reliability of the simulator-based training methodology.",
keywords = "Grid-World, Model Based Control, Neural Network, Q-learning, Reinforcement Learning",
author = "Avinash Sharma and Kanika Gupta and Anirudha Kumar and Aishwarya Sharma and Rajesh Kumar",
note = "Publisher Copyright: {\textcopyright} 2017 IEEE.; 2017 IEEE International Conference on Industrial Technology, ICIT 2017 ; Conference date: 23-03-2017 Through 25-03-2017",
year = "2017",
month = apr,
day = "26",
doi = "10.1109/ICIT.2017.7915468",
language = "English",
series = "Proceedings of the IEEE International Conference on Industrial Technology",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "837--842",
booktitle = "2017 IEEE International Conference on Industrial Technology, ICIT 2017",
address = "United States",
}