@article{M6F3EFC03, title = "PEFT Methods for Domain Adaptation", journal = "The Transactions of the Korea Information Processing Society", year = "2025", issn = "null", doi = "https://doi.org/10.3745/TKIPS.2025.14.4.239", author = "Lee You Jin, Yoon Kyung Koo, Chung Woo Dam", keywords = "Parameter-Efficient Fine-tuning, Domain Adaptation, Deep Learning, Large Language Model, LoRA, MoRA", abstract = "This study analyzed that the biggest obstacle in deploying Large Language Models (LLMs) in industrial settings is incorporating domain specificity into the models. To mitigate this issue, the study compared model performance when domain knowledge was additionally trained using MoRA, which enables learning more knowledge information, and LoRA, which is the most common among various PEFT methods. Along with this, training time was reduced through securing high-quality data and efficient data loading. The findings of this research will provide practical guidelines for developing efficient domain-specific language models with limited computing resources." }