@article{ author = {Farhang, Mohsen and Bahramgiri, Hosein and Dehghani, Hami}, title = {Novel Features for Modulation Recognition Using an 8PSK Demodulator}, abstract ={In this paper a feature-based modulation classification algorithm is developed for discriminating PSK signals. The candidate modulation types are assumed to be QPSK, OQPSK, π/4 DQOSK and 8PSK. The proposed method applies an 8PSK baseband demodulator in order to extract required features from observed symbols. The received signal with unknown modulation type is demodulated by an 8PSK demodulator whose output is considered as a finite state machine with different states and transitions for each candidate modulation. Estimated probabilities of particular transitions constitute the discriminating features. The obtained features are given to a Bayesian classifier which decides on the modulation type of the received signal. The probability of correct classification is computed with different number of observed symbols and SNR conditions by carrying out several simulations. The results show that the proposed method offers more accurate classification compared to previous methods for classifying variants of QPSK.}, Keywords = {automatic modulation classification, feature extraction, pattern recognition, variants of QPSK, Bayes classifier.}, volume = {13}, Number = {2}, pages = {3-10}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-70-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-70-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {Moradi, Ali and Shahbahrami, Asadollah and EbrahimiAtani, Reza and AlidoustNia, Mehr}, title = {Persian XML Documents Metaheuristic Clustering Based on Structure and Content Similarity}, abstract ={Due to the increasing number of documents, XML, effectively organize these documents in order to retrieve useful information from them is essential. A possible solution is performed on the clustering of XML documents in order to discover knowledge. Clustering XML documents is a key issue of how to measure the similarity between XML documents. Conventional clustering of text documents using a document similarity measure used in information content, they can cause structural information contained in XML documents is ignored. In this paper, a new model named matrix space model to represent both structural and content features of documents in XML, is proposed. Based on this model, the Jaccard similarity measure is defined and the colonial competitive algorithm for clustering XML documents is used. Experimental results show that the proposed model function in identifying similar documents which closely identified with the same structure and content information are effective. This method can improve the accuracy of clustering, and XML data can be used to increase productivity.}, Keywords = {Clustering, Persian, colonial competitive algorithm, }, volume = {13}, Number = {2}, pages = {11-23}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-29-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-29-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {khalilzadeh, mohammad ali and dustdarnughabi, hojjat}, title = {Evaluation of blood perfusion of the trapezius muscle with wavelet analysis of photoplethysmogram signal using neural network}, abstract ={Measurement of tissue blood perfusion has many applications in the prevention of pressure sores, muscle activity assessment and care of tissue blood perfusion during surgery. Photoplethysmography as a continuous measure for evaluation of blood perfusion in tissue is accepted by researchers. In this study a new method for assessment of blood perfusion to the tissue based on photoplethysmograph signal (PPG) is presented. Wavelengths of the PPG were near infrared 950 nm with source-to-detector separation of 7 and 22 mm. The probe was placed over the trapezius muscle of 19 healthy subjects under the external pressures of 0 and 40 and 80 mmHg. PPG envelope detected and wavelet transform calculated in the five frequency intervals. These bands relate to metabolic, neurogenic, myogenic, respiratory and cardiac activities. The p-value of the t-test analysis for extracted features was less than 0.005. Results have shown that by applying external pressure, tissue deep layers most affected and the amount of their blood perfusion is reduced. Accuracy of separation at different pressures for back propagation neural network (BPNN) was 73.68% and for generalized regression neural network (GRNN) was 79.6%. Improvement of this method can be as a clinical assessment of tissue blood perfusion and can be as an effective method in prevention of pressure ulcers.}, Keywords = {Blood perfusion, Photoplethysmogram, Wavelet transform, generalized regression neural network (GRNN)}, volume = {13}, Number = {2}, pages = {25-33}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-253-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-253-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {Noferesti, Samira and Shamsfard, Mehrnoush}, title = {Automatic building a corpus and exploiting it for polarity classification of indirect opinions about drugs}, abstract ={Opinion mining is a well-known problem in natural language processing that has attracted increasing attention in recent years. Existing approaches have been often focused on identifying direct opinions and ignored indirect ones. However, in some domains such as medical, indirect opinions occur frequently. Therefore, ignoring indirect opinions can lead to the loss of valuable information and noticeable decline in overall accuracy of opinion mining systems. In this paper, we present a semi-automatic approach to construct a corpus of indirect opinions from drug reviews. In the first step, we propose an automatic method for detection of indirect opinions and in the second step, we use domain knowledge, linguistic rules and review structure for polarity detection of drug reviews. Then we exploit the constructed corpus as a training set in machine learning techniques for polarity classification of new examples. Experimental results demonstrate that our proposed approach achieves 82.81 percent precision.}, Keywords = {opinion mining, indirect opinions, sentiment analysis, corpus construction, machine learning}, volume = {13}, Number = {2}, pages = {35-49}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-299-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-299-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {hamidi, hodjat}, title = {An Approach to protecting of data processing system in computing systems using convolutional code}, abstract ={Abstract We present a framework for algorithm-based fault tolerance methods in the design of fault tolerant computing systems. The ABFT error detection technique relies on the comparison of parity values computed in two ways. The parallel processing of input parity values produce output parity values comparable with parity values regenerated from the original processed outputs. Number data processing errors are detected by comparing parity values associated with a convolution code. This article proposes a new computing paradigm to provide fault tolerance for numerical algorithms. The data processing system is protected through parity values defined by a high-rate real convolution code. Parity comparisons provide error detection, while output data correction is affected by a decoding method that includes both round-off error and computer-induced errors. To use ABFT methods efficiently, a systematic form is desirable. A class of burst-correcting convolution codes will be investigated. The purpose is to describe new protection techniques that are easily combined with data processing methods, leading to more effective fault tolerance.}, Keywords = {algorithm-based fault tolerance (ABFT),convolution codes, parity values, syndrome}, volume = {13}, Number = {2}, pages = {51-69}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-349-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-349-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {Asgharian, Lida and ebrahimnezhad, Hossei}, title = {Animating of Carton Characters by Skeleton based Articular Motion Transferring of Other Objects}, abstract ={Abstract: Nowadays, the animators give life to the fancy characters by making natural movements to organs of cartoon characters. To achieve this goal, movements of living individuals can be applied into cartoon characters. In this paper, a skeletal correspondence finding based method is proposed to transfer movement of a 2D character into a new character, where these two shapes have the same structural topology, approximately. Based on the given animation sequence of source character, each body part of this character is segmented according to a specific motion. In this case, an exact skeleton with defined joints will be achieved for source shape. The target character skeleton is obtained by automatic skeleton extraction algorithms. In this stage, by skeletal correspondence finding between source and target character, we can transfer skeleton deformation of each source body parts into target body parts. This deformation contains the values of skew, scale and orientation that are achieved from reference pose and deformed poses of source skeletons. Finally, to evaluate the proposed method efficiency, we perform it on 2D animation characters. The achieved results illustrate the ability of the algorithm in generating correct and natural motions for different variety of characters. The method is robust to the type of characters and can transfer variety of deformations.}, Keywords = {Motion retargeting, Animation, Skeleton correspondence, Joint extraction}, volume = {13}, Number = {2}, pages = {71-89}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-312-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-312-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {sharifnoughabi, mojtaba and marvi, hossein and darabian, danial}, title = {Farsi Accent Recognition based on speech signal using efficient features extraction and Combining of Classifiers}, abstract ={Speech recognition has achieved great improvements recently. However, robustness is still one of the big problems, e.g. performance of recognition fluctuates sharply depending on the speaker, especially when the speaker has strong accent and difference Accents dramatically decrease the accuracy of an ASR system. In this paper we apply three new methods of feature extraction including Spectral Centroid Magnitude (SCM), its first order difference (∆SCM ) and Zak transformation to the original speech signal using accents selected from FARSDAT corpus then their performance of these methods have been compared with some common methods such as MFCC. Moreover a new feature based on MFCC algorithm have been proposed in order to use in noisy environments. Five different classifications, including MLP, KNN, PNN, RBF and SVM and their combination have been used to evaluate the performance of each feature extraction methods. Experimental results demonstrate improvement in the recognition rates in our proposed method.}, Keywords = {Spectral Centroid Magnitude, classifiers combination, Farsi accents, support vector machine, Improved Mel Frequency Cepstral Coefficient}, volume = {13}, Number = {2}, pages = {91-103}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-315-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-315-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {Asadi, Sekine}, title = {Providing a method for image preprocessing to improve the performance of JPEG}, abstract ={A lot of researchs have been performed in image compression and different methods have been proposed. Each of the existing methods presents different compression rates on various images. By identifing the effective parameters in a compression algorithm and strengthen them in the preprocessing stage, the compression rate of the algorithm can be improved. JPEG is one of the successful compression algorithm that various works have been done to improve its performance. Image contrast is one important factor affecting on JPEG compression rate. The lower the image contrast, the less detail is visible and JPEG will be able to compress such images with a higher rate. In this paper, a semi-lossless preprocessing method based on power operator is proposed that decreases the image contrast by reducing the range of graylevels in the image. Therefore, JPEG can compress the preprocess images with a higher compression ratio. To restore the image, after decoding the compressed image, by applying the inverse exponent value to the power operator on this image, an image similar to the original image will be achieved. The results show that the proposed preprocessing method substantially increases the JPEG compression ratio on natural images.}, Keywords = {Preprocessing, Image compression, Semi-lossless, JPEG method.}, volume = {13}, Number = {2}, pages = {105-120}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-320-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-320-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {Dehghan, Mohammad Hossein and Faili, Heshaam}, title = {Generating the Persian Constituency Treebank in an Automatic Converting Method}, abstract ={Treebanks is one of important and useful resource in Natural Language Processing tasks. Dependency and phrase structures are two famous kinds of treebanks. There have already made many efforts to convert dependency structure to phrase structure. In this paper we study an approach to convert dependency structure to phrase structure because of lack of a big phrase structure Treebank in Persian. Also we study the algorithm’s errors and propose a solution to solve the problem and improve the quality of conversion process. The experiment results show that we can improve the quality of conversion, about 25.85 percent, in Persian and about 4.39 percent in English. With the help of the conversion algorithm and the dependency Treebank, we produce the phrase structure treebank and train a parser using the resulted treebank. Our parser output is about 21 percent, better than the same parser introduced as baseline.}, Keywords = {Natural Language Processing, Persian Language, Dependency Structure Treebank, Phrase Structure Treebank, Phrase Structure Parser}, volume = {13}, Number = {2}, pages = {121-137}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-336-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-336-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} } @article{ author = {ShafeipourYourdeshahi, Sajjad and Seyedarabi, Hadi and Aghagolzadeh, Ali}, title = {Video based Face Recognition Using Orthogonal Locality Preserving Projection}, abstract ={In this paper, attempting to improve the recognition rate and solve some problems such as pose, lighting variations and partial occlusion in video sequences using Orthogonal Locality Preserving Projection (OLPP). In this research, first of all face in video frames is detected for background removing. Then each set of images is distributed on a nonlinear manifold and clustered using appropriate methods then the center of each cluster is considered as the cluster representative. It is also shown that by using OLPP the key frames are projected to the new space, where the frames in each manifold are better closed also the frames in different manifold are better separated. The recognition is done by projecting the test video sequence to the new space and calculating the distance between manifolds. Comparing the experimental results of the proposed method with other methods demonstrate the effectiveness of the proposed approach.}, Keywords = {face recognition, Orthogonal Locality Preserving Projection, key frame, subspace, manifold}, volume = {13}, Number = {2}, pages = {139-149}, publisher = {Research Center on Developing Advanced Technologies}, url = {http://jsdp.rcisp.ac.ir/article-1-340-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-340-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2016} }