@article{M40AE9C74, title = "Performance Improvement Method of Convolutional Neural Network Using Agile Activation Function", journal = "The Transactions of the Korea Information Processing Society", year = "2020", issn = "null", doi = "https://doi.org/10.3745/KTSDE.2020.9.7.213", author = "Na Young Kong/Young Min Ko/Sun Woo Ko", keywords = "Convolutional Neural Network, Agile Activation Function, Backpropagation, Learning", abstract = "The convolutional neural network is composed of convolutional layers and fully connected layers. The nonlinear activation function is used in each layer of the convolutional layer and the fully connected layer. The activation function being used in a neural network is a function that simulates the method of transmitting information in a neuron that can transmit a signal and not send a signal if the input signal is above a certain criterion when transmitting a signal between neurons. The conventional activation function does not have a relationship with the loss function, so the process of finding the optimal solution is slow. In order to improve this, an agile activation function that generalizes the activation function is proposed. The agile activation function can improve the performance of the deep neural network in a way that selects the optimal agile parameter through the learning process using the primary differential coefficient of the loss function for the agile parameter in the backpropagation process. Through the MNIST classification problem, we have identified that agile activation functions have superior performance over conventional activation functions." }