@article {10.3844/jcssp.2019.1522.1537, article_type = {journal}, title = {Arabic Sign Language Translator}, author = {Ahmed, Abdelmoty M. and Alez, Reda Abo and Tharwat, Gamal and Taha, Muhammad and Belgacem, B. and Al Moustafa, Ahmad M.J. and Ghribi, Wade}, volume = {15}, number = {10}, year = {2019}, month = {Oct}, pages = {1522-1537}, doi = {10.3844/jcssp.2019.1522.1537}, url = {https://thescipub.com/abstract/jcssp.2019.1522.1537}, abstract = {Development of systems that can recognize the gestures of Arabic Sign language (ArSL) provides a method for hearing impaired to easily integrate into society. This paper aims to develop a computational structure for an intelligent translator to recognize the isolated dynamic gestures of the ArSL. In our proposed system we build a datasets for ArSL from scratch of, we used 100-sign vocabulary from ArSL, we have applied 1500 video files for these signs. These signs were divided into five types of signs, recognizing a sign language gestures from dynamic gestures could be a difficult analysis issue. This paper solves the problem using gradient based key frame extraction technique. These key frames are useful for splitting continuous language gestures into sequence of signs for removing uninformative frames. After splitting of gestures every sign has been treated as isolated gesture. Then features of pre-processed gestures are extracted using Intensity Histogram by integrating with Gray Level Co-occurrence Matrix (GLCM) features. Experiments are performed on our own ArSL dataset and the matching between the ArSL and Arabic text is tested by Euclidian distance. The evaluation of the proposed system for the automatic recognition and translation for isolated dynamic ArSL gestures has proven to be effective and highly accurate. The experimental results show that the proposed system recognizes signs with a precision of 95.8%.}, journal = {Journal of Computer Science}, publisher = {Science Publications} }