@article {10.3844/jcssp.2025.177.183, article_type = {journal}, title = {Kenyan Sign Language Translation Using SSD MobileNet-v2 FPNlite Model}, author = {Muriithi, Henry Muchiri and Wanjala, Geoffrey Kasembeli}, volume = {21}, number = {1}, year = {2024}, month = {Dec}, pages = {177-183}, doi = {10.3844/jcssp.2025.177.183}, url = {https://thescipub.com/abstract/jcssp.2025.177.183}, abstract = {Speech impairment is a disability that impacts an individual's ability to communicate effectively through speech and hearing. Those affected often rely on alternative forms of communication, such as sign language. While sign language has become increasingly widespread in recent years, a significant challenge persists for non-sign language users, particularly in Kenya, where effective communication with sign language users remains a barrier. As such the disability creates inequality with affected people not being able to have equal opportunities. To bridge this gap and assist in achieving UN sustainable development goal number 10, which strives to reduce inequality technology, has been adopted to bridge this gap. Recent advancements in deep learning and computer vision have led to significant progress in motion and gesture recognition leveraging these cutting-edge techniques. However, not much work has been done specifically in translating Kenyan sign language. Many of the solutions have focused on sign languages from other developed countries. The focus of this study, therefore, is to create a vision-based application, which offers Kenyan sign language translation to text thus aiding communication between signers and non-signers. The model, developed using SSD MobileNet V2 FPNlite, achieved an accuracy of 85% after 20,000 training steps over 40 epochs.}, journal = {Journal of Computer Science}, publisher = {Science Publications} }