@article{ author = {BehzadFallahpour, Mojtaba and Dehghani, Hamid and JabbarRashidi, Ali and Sheikhi, Abbas}, title = {Modelling and Software Implementation of SAR Imaging System Performance in Spotlight Mode}, abstract ={SAR imaging systems are as a complement to passive remote sensing but the process of image formation in this systems is so complex So that the final image in the system is formed after the three basic steps: raw data acquisition, forming the signal space and image space. In addition, various factors within the system and outside the system are involved in the information that recorded by SAR, such as radar, platform, processing algorithm, imaging region and channel that each of them have many sub-parameters and this adds the complexity of the behavior of SAR. So due to the complexity, providing the model that describes how the SAR imaging system is highly important. In this paper, at first, the performance of the SAR image formation in spotlight mode placed on analytical modeling and after that the model comes in a soft implement. The implement includes three basic steps of image formation. So that raw data acquisition is done in CST and the signal and image formation are done in MATLAB software. This implementation provides a lot of abilities. So you can simulate the effect of the affect parameters in SAR images and better interpretation of themes. Also, the validity of the proposed solutions in electronic warfare or passive defense for SAR imaging systems can be studied by it.}, Keywords = {SAR, Signal Space, Image space, Scattering field, Functional model, Software Implementation }, volume = {13}, Number = {4}, pages = {3-18}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.3}, url = {http://jsdp.rcisp.ac.ir/article-1-377-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-377-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {zeinali, mansoor and ghasemian, hass}, title = {A novel method for increasing the spatial resolution of remote sensing images using lookup table}, abstract ={Different methods have been proposed to increase the image spatial resolution by mixed pixels decomposition. These methods can be divided into two groups. Some research have been attempted to obtain percentages of sub pixels and the other try to obtain their locations. These methods and their problems will be examined in this study. Common methods are reviewed with more emphasis. Finally, a new method for increasing the spatial resolution will be proposed to resolve some deficiencies of existing methods. Especially this method, instantly takes percentages and locations of mixed pixels end members without no use of additional information. This method applies a proper lookup table, which is derived from an input image. By defining a similarity metric function, we obtain a similar pixel for every input pixel. These similar pixels have equal sub pixel structures; hence, an input pixel will be decomposed to a proper set of sub pixels. In the high quality images, these sub pixels usually, belong to pure classes. This proposed method is examined on four sets of artificial and real data. First we degrade these data sets by averaging filtering, and then we restore degraded data, using this method and two other methods. One of these methods is a hard classification and the other is a combination of fuzzy c-means and direct method to obtain percentages and locations of sub pixels respectively. We obtain percent of correction classification and KAPPA criterions for these methods. Simulation results on artificial, real data show a good sub pixels decomposition performance of proposed method relative to those of other comparable methods. By particular, this method shows at least 7% of improvement in artificial and 2% in real data relative to other methods.  }, Keywords = {spatial resolution, change the image scale, lookup table, subpixel decomposition}, volume = {13}, Number = {4}, pages = {19-28}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.19}, url = {http://jsdp.rcisp.ac.ir/article-1-188-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-188-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {Esmaeili, Mohammad Reza and Zahiri, Seyed Hami}, title = {Epileptic seizure detection using Inclined Planes system Optimization algorithm(IPO)}, abstract ={Epilepsy is a neurological disorder after stroke. About 1 percent of people in the world are involved with this second most common neurological disorder. Epilepsy can affect people of different ages with an altered behavior or lack of patient awareness and affect one's social life. In 75% of cases, if epilepsy is diagnosed early and properly, it can be treated. Among all existing methods of analysis for the detection of epileptic brain activity, EEG is more applicable, due to its special features (including its low-cost and innocuous). Despite all the advantages of this method, the visual scoring of the EEG records by a human scorer is clearly a very time consuming and costly task considering the large number of epileptic patients admitted to the hospitals and the amount of data needs to be scored. Thus, a tremendous effort has been devoted by researchers towards automatic epileptic seizures detection in EEG. This paper offers a novel method based on heuristic and intelligent algorithms, inclined planes system optimization (IPO), to detect epileptic samples from healthy subjects. Like other heuristic algorithms, IPO is inspired by nature and its laws. How to move sphere objects on the slope without friction and their desire to reach the lowest point, shapes the main idea of the IPO. In the IPO, small balls like particles in the PSO are placed randomly on the search space. The balls search the search space to find the optimal point which is the lowest point (relative to a reference point) on the surface. In the current work, the data described by Andrzejak et al. was used; which contains 5 sets (Z, O, N, F and S). In this work, three different classification problems are created from the above dataset in order to compare the performance of our method with other approaches: In the first, two sets were examined, normal (set Z) and seizure (set S). In the second, four sets of the dataset were used and they were classified into two different classes: non-seizure (sets Z, N, F) and seizure (set S). In the third, all the EEGs from the dataset were used and they were classified into two different classes: sets Z, O, N and F are included in the non-seizure class and set S in the seizure class. The EEG signal under study is firstly decomposed into five sub-bands through DWT (D1–D4 and A4), and each sub-band represents different frequency bands information. Afterwards, four statistical parameters of maximum, minimum, average and standard deviation were calculated for each sub-band. And then, using the optimization algorithm IPO, the best weights are calculated to apply to the OVA classifier in order to find the best hyper plane separating the two classes. The fitness function defined in the IPO algorithm, is the number of signals that have been classified incorrectly. To classify EEG signals in three problems, the 10-fold Cross-Validation method is used. In this method, the data is divided into 10 subsections. And then, one subset is used for test and nine others for training. This procedure is repeated 10 times, until all the data is used for testing. The proposed algorithm have been implemented 10 times for the two wavelet functions Db1 and db2. Using the proposed method, the accuracy obtained for the three problems is 100%, 98/1%, 97/34%, respectively. Also by the proposed method diagnosis of epilepsy can be achieved very quickly. The results show that the algorithm is capable of detecting signals of epileptic and non-epileptic in less than 5 milliseconds. This makes it possible to use this method in real-time systems.}, Keywords = {Electroencephalogram(EEG), Epileptic seizure detection, Discrete wavelet transform(DWT), Heuristic algorithm, Inclined planes system optimization algorithm(IPO)}, volume = {13}, Number = {4}, pages = {29-42}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.29}, url = {http://jsdp.rcisp.ac.ir/article-1-238-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-238-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {VafaeiJahan, Maji}, title = {Feature Extraction of Computer Files Structure by Statistical Analysis}, abstract ={Files are the most important sources of information presenting in various formats such as texts, audio, video, images, web pages, etc. …; (in-depth) analysis of files for the purpose of recognition and investigating their unique properties (or characteristics) is one of the most significant issues in the field of personal security safety, information security, file-type identification, codes structuration analysis etc…. Statistical analytic methodology of working on the binary files contents based on the n-gram model has been opted for in the present paper in order to full investigate all different aspects of a file’s range of characteristics. Moreover, to reduce down the calculations volume and the n-gram model peculiar to the needed amount of memory, use has been made of word clustering. Later on analysis has been conducted on both files’ contents in two states of “blocking” and “full”: it is to be noted that in the “full” case such characteristics as Chi-square, Auto-correlation, Weighted term frequency-Inverse document frequency (TF-IDF), Fractal dimension etc … have been brought under comprehensive study; while in the “blocking” case, other properties like the entropy rate, the distance, etc … have been delved into. The gained results indicate that the extracted characteristics in the first method could well easily reflect the unique properties belonging to jpg, mp3, swf and html files; and in the second method, are able to clearly well reflect doc, html and pdf files properties.}, Keywords = {Files, n-gram model, word clustering, Canberra distance, entropy rate, Fractal dimension}, volume = {13}, Number = {4}, pages = {43-62}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.43}, url = {http://jsdp.rcisp.ac.ir/article-1-141-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-141-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {}, title = {Estimation of protein-coding regions in numerical DNA sequences using Variable Length Window method based on 3-D Z-curve}, abstract ={In recent years, estimation of protein-coding regions in numerical deoxyribonucleic acid (DNA) sequences using signal processing tools has been a challenging issue in bioinformatics, owing to their 3-base periodicity. Several digital signal processing (DSP) tools have been applied in order to Identify the task and concentrated on assigning numerical values to the symbolic DNA sequence, then applying spectral analysis tools such as the discrete Fourier transform (DFT) to locate the periodicity components. Despite of many advantages of Fourier transform in detection of exotic regions, this approach has some restrictions, such as high computational complexity and disability in locating the small length coding regions. In this paper, we improve the performance of the conventional DFT in estimating the protein coding regions utilizing a Gaussian window with variable length. First, the DNA strands are converted to numerical signals via the 3-D Z-curve method. Z curve is a robust, independent, less redundant approach, and has clear biological interpretation which can be regarded as a useful visualization technique for DNA analysis of any length. In the second stage, non-coding regions besides the background noise components are completely suppressed using the Gaussian variable length window. Also, we use a narrow-band band-pass filter in order to extract the period-3 components with  central frequency. Performance of the proposed algorithm is tested on F56F11.4 from C.elegans chromosome III, also two eukaryotic datasets, HMR195 and BG570,  is compared with other state-of-the-art methods based on the nucleotide evaluation metrics such as sensitivity, specificity, approximation correlation, and precision. Results revealed that, the area under the receiver operating characteristic (ROC) curve is improved from 4% to 40%, in HMR195 and BG570 datasets compared to other methods. Furthermore, the proposed algorithm reduces the number of incorrect nucleotides which are estimated as coding regions.   }, Keywords = {Protein Coding Regions, Period-3, Digital Signal Processing, DNA, Variable Length Window, Band-Limited Filter.}, volume = {13}, Number = {4}, pages = {63-78}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.63}, url = {http://jsdp.rcisp.ac.ir/article-1-101-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-101-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {ZohourParvaz, Farnaz and Fatemizadeh, Emad and Behnam, Hami}, title = {Speed improvement in graph-cuts-based registration for non-rigid image registration of brain magnetic resonance images}, abstract ={Image processing methods, which can visualize objects inside the human body, are of special interests. In clinical diagnosis using medical images, integration of useful data from separate images is often desired. The images have to be geometrically aligned for better observation. The procedure of mapping points from the reference image to corresponding points in the floating image is called Image Registration. It is a spatial transform. These images might be different because they were taken at different times or applied by using different devices. By the nature of this image transformation, image registration can be classified into rigid registration and non-rigid registration. The freedom’s degree in a rigid transformation is relatively low and the methods of rigid image registration are becoming mature. In contrast, non-rigid image registration is still a challenging problem because of its high degree of freedom. One of the non-rigid image registration methods is turning the registration problem into an optimization problem and obtaining the optimal value as the result of registration. An example of these methods is the graph-cuts based registration. The basic technique is to construct a specialized graph for the energy function to be minimized in a way that the minimum cut on this graph also minimizes the energy. Given that our focus in this research, is on the medical image registration, and time is one of the critical factors in medical applications. It seems that improvement of this method in terms of run time will be helpful for its clinical and medical applications. In order to achieve this goal, in this research, with modifying the energy function, we proposed a method that significantly reduces the run time of registration process. The implementation results of our proposed method on the images with artificial deformations which are similar to the most pessimistic possible deformation modes in real image data, show that the proposed algorithm is about three times faster than the existing algorithm, while the average amount of SAD criterion will be increased from 0.7 to 1.}, Keywords = {Non-rigid image registration, Graph-cuts, Magnetic resonance images}, volume = {13}, Number = {4}, pages = {79-92}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.79}, url = {http://jsdp.rcisp.ac.ir/article-1-252-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-252-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {dianat, rouhollah and ahmadi, morteza ali and akhlaghi, yahya and babaali, bagher}, title = {Introducing a new information retrieval method applicable for speech recognized texts}, abstract ={In this article a pre-processing method is introduced which is applicable in speech recognized texts retrieval task. We have a text corpus, t generated from a speech recognition system and a query as inputs,  to search queries in these documents and find relevant documents. A basic problem in a typical speech recognized text is some error percentage in recognition. This, results erroneously assigning to irrelevant documents.The idea of this proposed method, is to detect error-prone terms and to find similar words for each term. A parameter is defined which calculates the probability for occurring errors in the error-prone words. To recognize similar words for each specific term, based on a criterion called average detection rate (ADR) and levenshtein distance criterion, some candidates are chosen as the initial similar words set. And then, a conversion probability is defined based on the conversion rate (CR) and the noisy channel model (NCM) and the words with higher probability based on a threshold level are selected as the final similar words. In the retrieval process, these words are considered in the search step in addition to the base word.  Implementation result shows a significant improvement up to 30% of F-measure in information retrieval method with consideration of this pre-processing.}, Keywords = {Information retrieval, Speech recognition, Document, Query, Levenshtein Distance}, volume = {13}, Number = {4}, pages = {93-108}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.93}, url = {http://jsdp.rcisp.ac.ir/article-1-360-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-360-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {gharaee, hossein and mohammadi, fari}, title = {Modified AODV Routing Protocol in Order to Defend Wormhole Attacks}, abstract ={Mobile Ad hoc Networks (MANET) are vulnerable to both active and passive attacks. The wormhole attack is one of the most severe security attacks in wireless ad hoc networks, an attack that can be mounted on a wide range of wireless network protocols without compromising any cryptographic quantity or network node.  In Wormhole attacks, one malicious node tunnels packets from its location to the other malicious node. Such wormhole attacks result in a false route with fewer. If the source chooses this fake route, malicious nodes have the option of sniff, modify, selectively forward packets or them. Existing solution defends wormhole attacks, such as SECTOR, Packet Leashes, DelPHI, directional antenna. These solutions require special hardware or strict synchronized clocks or cause message overhead, or generate false-positive alarms. A novel approach MAODV: Modified AODV is proposed to defend wormhole attacks, launched in AODV. The proposed approach is based on weight per hop. Each node in network has its own weight, given by administration due to trusty power capability. Sum of weight will not be exceeded from 100. Whenever a source node wants to send a traffic to destination, puts its minimum weight in RREQ packet to constitute the route. The destination node is selected in the route that its weight is close to destination announcement weight. Since no special hardware and no encryption techniques are used, it is likely to have less overhead and delay, compared to other techniques. The proposed wormhole defend mechanism is discussed in detail. Our proposed system does not require any synchronized clocks or special hardware to defend wormhole attacks. In our proposed system some parameters will be added to AODV routing protocol and make it more secure against wormhole attacks. We will name this new protocol as MAODV. In the first place, there is a master node in network, which  weighs 100 (weighs of whole network). Whenever a node attends to enter the network, sends a join message to nearest neighbor. After receiving the message, master node will share its weights with the node requester, and sends the weight to this node requester. This process and weight sharing will be repeated after any requests to join a network, and total weight of network is not exceeded from 100. In our proposed method, each path which is created between source and destination, has a particular weight and this weight equals to intermediate node weights being added to each other. In MAODV whenever a source node wants to send RREQ packet, it adds the minimum weight to constitute route. After receiving RREQ packets, each intermediate node increases its weight beside increasing hop count. Each intermediate node does the same action, as far as destination node receives, RREQ packet among the received RREQ, one of them will be selected which its weight is the same as minimum requested weight by source, or slightly more than that. For instance, consider fig 1 which has 14 nodes. Assuming the node weights are equal for each node and its 7. As mentioned, the weight of whole network is tantamount to 100. Example 1: consider fig. 1 in which node A sends RREQ to node B. At first, node A checks its cache table to see whether there is a route between A and B, or not. If the answer is positive, it starts to send data. If the answer is negative, it sets up RREQ as follow: <A,B,1,7.25,[]> which means: A: source, B: destination, 1: hop count, 7: constitute path weight, 25: request weight, []: intermediate nodes. Each node which receives RREQ will check if it is the destination or not. If it wasn’t: 1. Increase hop count, 2. puts its weight to constitute path weight, 3. Adds its address as an intermediate node. And then broadcasts RREQ packet to the neighbors. In this example node A sends RREQ to X and C, which are legitimate neighbor of A. When X receives the packet, modifies it as: <A,B, 2,(4,25,[X]> and forwards it to its neighbors on the other hand node. C modifies packet as: <A,B,2,(4,25,[C]> and forwards it to its neighbor D. This action will be repeated until B gets two RREQ - <A,B,4,28,25,[C,D,E]> and <A,B,7,25,48,[X,U,V,W, Z,Y> - among the received RREQ, B will be selected which its weight is the same as minimum requested weight by A, or slightly more than that, so the first route will be chosen by B. node B setup RREP packet as <A,B,1,4,25,7, [E,D,C]> which means: A: source, B: destination, 1: back path weight, 4: hop count, 25: request weight, 7: constitute path weight, [E,D,C]: intermediate nodes. The effectiveness of the propose mechanism is evaluated using ns2 network simulator. The simulator's outcome demonstrates that PDR in MAODV rose by 5% up to 8% in presence of two malicious nodes, compared to PDR in AODV routing protocol. The average delay point to point in MAODV is more than AODV, but on the other hand, it is less than SAODV due to not using encryption.}, Keywords = {MANET, Wormhole attacks , AODV, NS2 }, volume = {13}, Number = {4}, pages = {109-120}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.109}, url = {http://jsdp.rcisp.ac.ir/article-1-212-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-212-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {Ghayoomi, Masoo}, title = {A Comparative Study on the Impact of Part-of-Speech Tagging on Parsing for the Persian Language Processing}, abstract ={In this paper, the role of Part-of-Speech (POS) tagging for parsing in automatic processing of the Persian language is studied. To this end, the impact of the quality of POS tagging as well as the impact of the quantity of information available in the POS tags on parsing are studied. To reach the goals, three parsing scenarios are proposed and compared. In the first scenario, the parser assigns the POS tags firstly and then it parses the input sentence. In the second scenario, an external POS tagger is usedto assign the tags, then the sentence is parsed. In the third scenario, the parser uses the gold standard POS tags to parse the input sentence. In this study, various evaluation metrics are used to show the impacts from different points of views. The experimental results show that the quality of the POS tagger and the quantity of the information available in the POS tags have a direct effect on the parsing performance. The high quality of the POS tags causes error reduction in parsing and also it increases parsing performance. Moreover, lack ofmorphological -syntactic information in the POS tags has a high negative impact on parsing performance. This impact is more pronounced than the impact of POS tagger performance. }, Keywords = {processing of the Persian language, part-of-speech tagging, parsing}, volume = {13}, Number = {4}, pages = {121-132}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.121}, url = {http://jsdp.rcisp.ac.ir/article-1-300-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-300-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} } @article{ author = {Jafarian-Moghaddam, Ahmad Reza and Barzinpour, Farnaz and Fathian, Mohamm}, title = {New Clustering Technique using Artificial Immune System and Hierarchical technique}, abstract ={Artificial immune system (AIS) is one of the most meta-heuristic algorithms to solve complex problems. With a large number of data, creating a rapid decision and stable results are the most challenging tasks due to the rapid variation in real world. Clustering technique is a possible solution for overcoming these problems. The goal of clustering analysis is to group similar objects. AIS algorithm can be used in data clustering analysis. Although AIS is able to good display configure of the search space, but determination of clusters of data set directly using the AIS output will be very difficult and costly. Accordingly, in this paper a two-step algorithm is proposed based on AIS algorithm and hierarchical clustering technique. High execution speed and no need to specify the number of clusters are the benefits of the hierarchical clustering technique. But this technique is sensitive to outlier data. So, in the first stage of introduced algorithm the search space and the configuration space are identified using the proposed AIS algorithm, and therefore outlier data are determined. Then in second phase, using hierarchical clustering technique, clusters and their number are determined. Consequently, the first stage of proposed algorithm eliminates the disadvantages of the hierarchical clustering technique, and AIS problems will be resolved in the second stage of the proposed algorithm. In this paper, the proposed algorithm is evaluated and assessed through two metrics that were identified as (i) execution time (ii) Sum of Squared Error (SSE): the average total distance between the center of a cluster with cluster members used to measure the goodness of a clustering structure. Finally, the proposed algorithm has been implemented on a real sample data composed of the earthquake in Iran and has been compared with the similar algorithm titled Improved Ant System-based Clustering algorithm (IASC). IASC is based on Ant Colony System (ACS) as the meta-heuristics clustering algorithm. It is a fast algorithm and is suitable for dynamic environments. Table 1 shows the results of evaluation.   Table 4: Compare the two algorithms Proposed algorithm IASC Alg. 12 18 Execution time (s) 5/3 9/4 SSE   The results showed that the proposed algorithm is able to cover the drawbacks in AIS and hierarchical clustering techniques and on the other hand has high precision and acceptable run speed.}, Keywords = {Clustering Analysis, Artificial immune system (AIS), Hierarchical Clustering.}, volume = {13}, Number = {4}, pages = {133-145}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.18869/acadpub.jsdp.13.4.133}, url = {http://jsdp.rcisp.ac.ir/article-1-88-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-88-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2017} }