@article{ author = {KuchakiRafsanjani, Marjan and BorumandSaeid, Arsham and Mirzapour, Farzane}, title = {Hybrid multi-criteria group decision-making for supplier selection problem with interval-valued Intuitionistic fuzzy data}, abstract ={The main objectives of supply chain management are reducing the risk of supply chain and production cost, increase the income, improve the customer services, optimizing the achievement level, and business processes which would increase ability, competency, customer satisfaction, and profitability. Further, the process of selecting the appropriate supplier capable of providing buyer's requirements in terms of quality products with suitable price and at a suitable time and size is one of the most essential activities to create an efficient supply chain. Consequently, false decisions in the context of supplier selection would lead to negative effects. Usually, suitable supplier selection methods have been multi-criteria or attribute, so finding the optimal solution for supplier selection is demanding. The customary methods in this field have struggled with quantitative criteria however there are a wide range of qualitative criteria in supplier selection. this article has used interval valued intuitionistic fuzzy sets for selecting the appropriate suppliers, which reflect ambiguity and uncertainty far better than other methods. In this article, trapezoidal fuzzy membership function is used for lingual qualitative values. Goal programming satisfaction function (GPSF) is a kind of technique that helps decision makers in solving problems involving conflicting and competing criteria and objectives. Due to the importance of the issue, in this paper, hybrid approach with a group decision-making in Multiple Criteria Decision Making (MCDM) in the context of a range of interval-valued intuitionistic fuzzy sets is implemented to solve the supplier selection problem. In this model in phase 1, decision makers express their opinion about each alternative based on different attribute qualitatively, and after creating interval valued intuitionistic fuzzy membership, a new variable is defined that via its help, interval-valued intuitionistic fuzzy amounts are calculated for each alternative. because of Having capabilities and comprehensiveness in their inside, not only they are better than other fuzzy sets but also they are the best for tracing the real condition and environment in order to select suppliers. Thereafter, for each alternative upper and lower bonds are calculated based on interval-valued intuitionistic fuzzy amounts. In phase 2, Operator Weighted Average (OWA) algorithm is used to reach a collective consensus. After computing the degree of consensus, closeness coefficients is evaluated within the help of TOPSIS method, which is in fact one of the most practicable methods between multi-criteria decision-making methods, such as SAW, AHP, CP, VIKOR. With regard to closeness coefficient, the amount of closeness between individual and collective’s agreement is accounted. The main aim of this article is optimizing the closeness coefficient. The alternative with maximum closeness coefficient is closer to the ideal solution. The final goal of proposed model is ranking the suppliers, meaning that satisfy the main factors of decision making, which is why GPSF model is used. After giving goal and restrict functions, GPSF model will be solved and rank alternatives. }, Keywords = {Interval-valued intuitionistic fuzzy set, Collective preference, Fuzzy TOPSIS, Multi-criteria, Supplier selection, Goal programming satisfaction function}, volume = {17}, Number = {3}, pages = {3-16}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.3}, url = {http://jsdp.rcisp.ac.ir/article-1-941-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-941-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Mavaddati, Samir}, title = {A New Method for Speech Enhancement Based on Incoherent Model Learning in Wavelet Transform Domain}, abstract ={Quality of speech signal significantly reduces in the presence of environmental noise signals and leads to the imperfect performance of hearing aid devices, automatic speech recognition systems, and mobile phones. In this paper, the single channel speech enhancement of the corrupted signals by the additive noise signals is considered. A dictionary-based algorithm is proposed to train the speech and noise models for each subband of wavelet decomposition level based on the coherence criterion. Using the presented learning method, the self-coherence measure between different atoms of each dictionary and mutual coherence between the atoms of speech and noise dictionaries are minimized and lower sparse reconstruction error is yielded. In order to reduce the computation time, a composite dictionary is utilized including only the speech dictionary and one of the noise dictionaries selected corresponding to the noise condition in the test environment. The speech enhancement algorithm is introduced in two scenarios, supervised and semi-supervised situations. In each scenario, a voice activity detector (VAD) scheme is employed based on the energy of sparse coefficient matrices when the observed data is coded over the related dictionary. The presented VAD algorithms are based on the energy of the coefficient matrices in the sparse representation of the observation data over the specified dictionaries. These speech enhancement schemes are different in the mentioned scenarios. In the proposed supervised scenario, domain adaptation technique is employed to transform a learned noise dictionary into an adapted dictionary according to the noise conditions of the test environment. Using this step, the observed data is sparsely coded with low sparse approximation error based on the current situation of the noisy environment. This technique has a prominent role to obtain better enhancement results particularly when the noise signal has non-stationary characteristics. In the proposed semi-supervised scenario, adaptive thresholding of wavelet coefficients is carried out based on the variance of the estimated noise for each frame in different subbands. These implementations are carried out in two different conditions, the training and test steps, as speaker dependent and speaker independent scenarios. Also, different measures are applied to evaluate the performance of the presented enhancement procedures. Moreover, a statistical test is used to have a more precise performance evaluation for different considered methods in the various noisy conditions. The experimental results using different measures show that the presented supervised enhancement scheme leads to much better results in comparison with the baseline enhancement methods, learning-based approaches, and earlier wavelet-based algorithms. These results have been obtained for an extensive range of noise types including the structured, unstructured, and periodic noise signals in different SNR values.}, Keywords = {Speech enhancement, Dictionary learning, Sparse representation, Domain adaptation, Voice activity detector, Wavelet transform}, volume = {17}, Number = {3}, pages = {17-36}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.17}, url = {http://jsdp.rcisp.ac.ir/article-1-835-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-835-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Ahmadi, Tahere and Karshenas, Hossein and Babaali, Bagher and Alinejad, Batool}, title = {Allophone-based acoustic modeling for Persian phoneme recognition}, abstract ={Phoneme recognition is one of the fundamental phases of automatic speech recognition. Coarticulation which refers to the integration of sounds, is one of the important obstacles in phoneme recognition. In other words, each phone is influenced and changed by the characteristics of its neighbor phones, and coarticulation is responsible for most of these changes. The idea of modeling the effects of speech context, and using the context-dependent models in phoneme recognition is a method which used to compensate the negative effects of coarticulation. According to this method, if two similar phonemes in speech have different contexts, each of them constitute a separate model. In this research, a linguistic method called allophonic modeling has been used to model context effects in Persian phoneme recognition. For this purpose, in the first phase, the rules required for occurrence of various allophones of each phoneme, are extracted from Persian linguistic resources. So each phoneme is considered as a class, consisting of its various context-dependent forms named allophones. The necessary prerequisites for modeling and identifying allophones, is an allophonic corpus. Since there was no such corpus in Persian language, SMALL FARSDAT corpus has been used. This corpus is segmented and labelled manually for each sentence, word and phoneme. So the phonological and lingual context required for the realization of allophones, is implemented in this corpus. For example, the syllabification has been performed on the corpus and then, for each phoneme, its position (first, middle and end) in the word and syllable is specified using different numeric tags. In the next step, allophonic labeling has been performed by searching for each of the allophonic contexts in the corpus. These allophonic corpus is used to model and recognize the allophones of input speech. Finally, each allophone is assigned to a proper phonemic class so phoneme recognition has been done using allophones. The experimental results show a high accuracy of the proposed method in phenome recognition, indicating a significant improvement comparing with other state-of-the-art methods.}, Keywords = {automatic speech recognition, automatic phoneme recognition, context-dependent models, phoneme, allophone, coarticulation}, volume = {17}, Number = {3}, pages = {37-54}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.37}, url = {http://jsdp.rcisp.ac.ir/article-1-903-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-903-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Bouyer, Asgarali and Norouzi, Somayeh}, title = {Sampling from social networks’s graph based on topological properties and bee colony algorithm}, abstract ={In recent years, the sampling problem in massive graphs of social networks has attracted much attention for fast analyzing a small and good sample instead of a huge network. Many algorithms have been proposed for sampling of social network’ graph. The purpose of these algorithms is to create a sample that is approximately similar to the original network’s graph in terms of properties such as degree distribution, clustering coefficient, internal density and community structures, etc. There are various sampling methods such as random walk-based methods, methods based on the shortest path, graph partitioning-based algorithms, and etc. Each group of methods has its own pros and cones. The main drawback of these methods is the lack of attention to the high time complexity in making the sample graph and the quality of the obtained sample graph. In this paper, we propose a new sampling method by proposing a new equation based on the structural properties of social networks and combining it with bee colony algorithm. This sampling method uses an informed and non-random approach so that the generated samples are similar to the original network in terms of features such as network topological properties, degree distribution, internal density, and preserving the clustering coefficient and community structures. Due to the random nature of initial population generation in meta-heuristic sampling methods such as genetic algorithms and other evolutionary algorithms, in our proposed method, the idea of ​​consciously selecting nodes in producing the initial solutions is presented. In this method, based on the finding hub and semi-hub nodes as well as other important nodes such as core nodes, it is tried to maintain the presence of these important nodes in producing the initial solutions and the obtained samples as much as possible. This leads to obtain a high-quality final sample which is close to the quality of the main network. In this method, the obtained sample graph is well compatible with the main network and can preserve the main characteristics of the original network such as topology, the number of communities, and the large component of the original graph as much as possible in sample network. Non-random and conscious selection of nodes and their involvement in the initial steps of sample extraction have two important advantages in the proposed method. The first advantage is the stability of the new method in extracting high quality samples in each time. In other words, despite the random behavior of the bee algorithm, the obtained samples in the final phase mostly have close quality to each other. Another advantage of the proposed method is the satisfactory running time of the proposed algorithm in finding a new sample. In fact, perhaps the first question for asking is about time complexity and relatively slow convergence of the bee colony algorithm. In response, due to the conscious selection of important nodes and using them in the initial solutions, it generates high quality solutions for the bee colony algorithm in terms of fitness function calculation. The experimental results on real world networks show that the proposed method is the best to preserve the degree distribution parameters, clustering coefficient, and community structure in comparison to other method.}, Keywords = {Sampling, Social networks, Clustering coefficient, Artificial Bee Colony}, volume = {17}, Number = {3}, pages = {55-70}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.55}, url = {http://jsdp.rcisp.ac.ir/article-1-1009-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-1009-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Rezaeian, Mohammad Rez}, title = {Analytical determination of the chemical exchange saturation transfer (CEST) contrast in molecular magnetic resonance imaging}, abstract ={Magnetic resonance based on molecular imaging allows tracing contrast agents thereby facilitating early diagnosis of diseases in a non-invasive fashion that enhances the soft tissue with high spatial resolution. Recently, the exchange of protons between the contrast agent and water, known as the chemical exchange saturation transfer (CEST) effect, has been measured by applying a suitable pulse sequence to the magnetic resonance imaging (MRI) scanner. CEST MRI is increasingly used to probe mobile proteins and microenvironment properties, and shows great promise for tumor and stroke diagnosis. This effect leads to a reduction in magnetic moments of water causing a corresponding decrease in the gray scale intensity of the image, providing a negative contrast in the CEST image. The CEST effect is complex, and it depends on the CEST agent concentration, exchange rates, the characteristic of the magnetization transfer (MT), and the relaxation properties of the tissue. The CEST contrast is different from the inherent MT of macromolecule bounded protons which evidently occurs as a dipole-dipole interaction between water and macromolecular components. Recently it was shown that CEST agents can be strongly affected by the MT and direct saturation effects, so corrections are needed to derive accurate estimates of CEST contrast. Specifically, the existence of an analytical relation between the chemical exchange rate and physiological parameters such as the core temperature, glucose level, and PH has generated more interest in quantification of the CEST contrast. The most important model was obtained by analyzing water saturation spectrum named magnetization transfer ratio spectrum that was quantified by solving Bloch equations. This paper provides an analytical closed-formula of CEST contrast under steady state and transient conditions based on the eigenspace solution of the Bloch-McConnell equations for both of the MT and CEST effects as well as their interactions. In this paper, the CEST contrast has been modeled in two- and three-pool systems using measured (experimental- real data) and fitted data similar to the muscle tissue by considering interfering factors. The resulting error was characterized by an average of relative sum-square between three experimental data and fitted CEST contrast based on the proposed formulation lower than 4 percent. For further validation, these formulations were compared to the empirical formulation of the CEST effect based on a diamagnetic contrast agent introduced in the two-pool system. Using the proposed analytical expression for the CEST contrast, we optimized critical parameters such as concentration contrast agent, chemical exchange rate and characteristics of the electromagnetic radio frequency pulse via amplitude and pulse width in the rectangular pulse.}, Keywords = {Chemical exchange saturation transfer, Bloch-McConnell equations, Magnetization transfer, Numerical solution, Z-spectra modeling}, volume = {17}, Number = {3}, pages = {71-86}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.71}, url = {http://jsdp.rcisp.ac.ir/article-1-994-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-994-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Sekhavat, Yoones and Namani, Mohammad Sadegh}, title = {Believable Visual Feedback in Motor Learning Using Occlusion-based Clipping in Video Mapping}, abstract ={Gait rehabilitation systems provide patients with guidance and feedback that assist them to better perform the rehabilitation tasks. Real-time feedback can guide users to correct their movements. Research has shown that the quality of feedback is crucial to enhance motor learning in physical rehabilitation. Common feedback systems based on virtual reality present interactive feedback in a monitor in front of a user. However, in this technique, there is a gap between where the feedback is presented and where the actual movement occurs. In particular, there is a discrepancy between where the actual movement occurs (e.g., on a treadmill) and the place of presenting feedback (e.g., a screen in front of the user). As a result, the feedback is not provided in the same location, which requires users perform additional cognitive processing to understand and apply the feedback. This discrepancy is misleading and can consequently result in difficulties to adapt the changes in rehabilitation tasks. In addition, the occlusion problem is not well handled in existing feedback systems that results in misleading the users to assume that the obstacle is on the foot. To address this problem, we need to make an illusion of putting a foot on the obstacle. In this paper, we propose a visual feedback system based on video mapping to provide a better understanding of the relationship between body perception and movement kinematics. This system is based on Augmented Reality (AR) in which visual cues in the form of light are projected on the treadmill using video projectors. In this system, occlusion-based clipping is used to enhance the believability of the feedback. We argue that this system contributes to the correct execution of rehabilitation exercises by increasing patients’ awareness of gait speed and step length. We designed and implemented two prototypes including the video projection with occlusion-based clipping (OC) and a prototype with no occlusion-based clipping (NOC). A set of experiments were performed to assess and compare the ability of unimpaired participants to detect real-time feedback and make modifications to gait using our feedback system. In particular, we asked 24 unimpaired participants to perform stepping and obstacle avoidance tasks. Since the focus of the paper is the quality of the feedback than the effect of feedback on training in long-term, unimpaired participants were recruited for this study. In the experiments, a motion capture device was used to measure the performance of participants. We demonstrated that our system is effective in terms of steps to adapt changes, obstacles to adapt changes, normalized accumulative deviation, quality of user experience, and intuitiveness of feedback. The results showed that projection-based AR feedback can successfully guide participants through a rehabilitation exercise. In particular, the results of this study showed statistically significant differences between the fault-rate of participants using OC and NOC prototypes in the stepping (p=0.0031) and obstacle avoidance (0.021) tasks. In addition, participates rated OC more intuitive than NOC in terms of the quality of feedback. Our feedback system showed a significant improvement in participants’ ability to adapt the changes while walking on the treadmill.}, Keywords = {Video mapping, real-time feedback, motor learning, augmented reality}, volume = {17}, Number = {3}, pages = {87-100}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.87}, url = {http://jsdp.rcisp.ac.ir/article-1-915-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-915-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Kazemitabar, Javad and Tavakkoli, Mitr}, title = {A Bayesian approach for image denoising in MRI}, abstract ={Magnetic Resonance Imaging (MRI) is a notable medical imaging technique that is based on Nuclear Magnetic Resonance (NMR). MRI is a safe imaging method with high contrast between soft tissues, which made it the most popular imaging technique in clinical applications. MR Image's visual quality plays a vital role in medical diagnostics that can be severely corrupted by existing noise during the acquisition process. Therefore, the denoising of these images has great importance in medical applications. During the last decades, lots of MR denoising approaches from various groups of techniques have been proposed that can be classified into two general groups of acquisition-based noise reduction and post-acquisition denoising methods. The first group's approaches will add imaging time and led to a much time-consuming process. The second group's issues are its complicated mathematical equations required for image denoising, in which stochastic algorithms are usually required to solve these complex equations. This study aims to find an appropriate statical post-acquisition denoising MR imaging method based on the Bayesian technique. Finding the appropriate prior density function also has great importance since the Bayesian technique's performance is related to its prior density function. In this study, the uniform distribution has been applied as the prior density function. The prior uniform distribution function will reduce the Bayesian algorithm to its simplest possible state and lower computational complexity and time consumption. The proposed method can solve the numerical problems with an adequate timing process without complex algorithms and remove noise in less than 120 seconds on average in all cases. To quantitatively assess image improvement, we used the Structural Similarity Function (SSIM) in MATLAB. The similarity with this function shows an average improvement of more than 0.1 in all images. Considering the results, it can be concluded that combining the uniform distribution function as a prior density function and the Bayesian algorithm can significantly reduce the image's noise without the time and computational cost.}, Keywords = {Bayesian estimation, Rician distribution, Magnetic Resonance Imaging}, volume = {17}, Number = {3}, pages = {101-108}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.101}, url = {http://jsdp.rcisp.ac.ir/article-1-893-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-893-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Ghofrani, Faegheh and Amini, Mortez}, title = {Privacy Preserving Dynamic Access Control Model with Access Delegation for eHealth}, abstract ={eHealth is the concept of using the stored digital data to achieve clinical, educational, and administrative goals and meet the needs of patients, experts, and medical care providers. Expansion of the utilization of information technology and in particular, the Internet of Things (IoT) in eHealth, raises various challenges, where the most important one is security and access control. In this regard, different security requirements have been defined; such as the physician’s access to the patient’s EHR (electronic health record) based on the physician’s physical location, detection of emergency conditions and dynamically granting access to the existing physician or nurse, preserving patients’ privacy based on their preferences, and delegation of duties and related permissions. In security and access control models presented in the literature, we cannot find a model satisfying all these requirements altogether. To fill this gap, in this paper, we present a privacy preserving dynamic access control model with access delegation capability in eHealth (called TbDAC). The proposed model is able to tackle the security challenges of these environments when the physicians and nurses access the patients’ EHR. The model also includes the data structures, procedures, and the mechanisms necessary for providing the access delegation capability. The proposed access control model in this paper is in fact a family of models named TbDAC for access control in eHealth considering the usual hospital procedures. In the core model (called TbDAC0), two primitive concepts including team and role are employed for access control in hospitals. In this model, a set of permission-types is assigned to each role and a medical team (including a set of hospital staff with their roles) is assigned to each patient. In fact the role of a person in a team determines his/her permissions on the health information of the patient. Since patients’ vital information is collected from some IoT sensors, a dynamic access control using a set of dynamic and context-aware access rules is considered in this model. Detecting emergency conditions and providing proper permissions for the nearest physicians and nurses (using location information) is a key feature in this model. Since health information is one of the most sensitive individuals’ personal information, the core model has been enhanced to be a privacy preserving access control model (named TbDAC1). To this aim, the purpose of information usage and the privacy preferences of the patients are considered in the access control enforcement procedure. Delegation of duties is a necessity in medical care. Thus, we added access delegation capability to the core model and proposed the third member of the model family, which is named TbDAC2. The complete model that considers all security requirements of these environments including emergency conditions, privacy, and delegation is the last member of this family, named TbDAC3. In each one of the presented models, the therapeutic process carried out in the hospitals, the relational model, and the entities used in the model are precisely and formally defined. Furthermore in each model, the access control process and the dynamic access rules for different situations are defined. Evaluation of the proposed model is carried out using three approaches; comparing the model with the models proposed in related research, assessing the real-world scenarios in a case study, and designing and implementing a prototype of an access control system based on the proposed model for mobile Android devices. The evaluations show the considerable capabilities of the model in satisfying the security requirements in comparison to the existing models which proposed in related research and also its applicability in practice for different simple and complicated access scenarios.}, Keywords = {eHealth, IoT, Dynamic Access Control, Privacy, Access Delegation}, volume = {17}, Number = {3}, pages = {109-140}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.109}, url = {http://jsdp.rcisp.ac.ir/article-1-916-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-916-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Ahmadi, Morteza ali and Dianat, Rouhollah}, title = {Introducing a method for extracting features from facial images based on applying transformations to features obtained from convolutional neural networks}, abstract ={In pattern recognition, features are denoting some measurable characteristics of an observed phenomenon and feature extraction is the procedure of measuring these characteristics. A set of features can be expressed by a feature vector which is used as the input data of a system. An efficient feature extraction method can improve the performance of a machine learning system such as face recognition in the image field. Most of the feature extraction methods in facial images are categorized as geometric feature extractor methods, linear transformation-based methods and neural network-based methods. Geometric features include some characteristics of the face such as the distance between the eyes, the height of the nose and the width of the mouth. In the second category, a linear transformation is applied to the original data and displaces them to a new space called feature space. In the third category, the last layer in the network, which is used for categorization, is removed, and the penultimate layer output is used as the extracted features. Convolutional Neural Networks (CNNs) are one the most popular neural networks and are used in recognizing and verifying the face images, and also, extracting features. The aim of this paper is to present a new feature extraction method. The idea behind the method can be applied to any feature extraction problem. In the proposed method, the test feature vector is accompanied with the training feature vectors in each class. Afterward, a proper transform is applied on feature vectors of each class (including the added test feature vector) and a specific part of the transformed data is considered. Selection of the transform type and the other processing, such as considering the specific part of the transformed data, is in such a way that the feature vectors in the actual class are encountered with less disturbing than the other ones. To meet this goal, two transformations, Fourier and Wavelet, have been used in the proposed method. In this regard, it is more appropriate to use transformations that concentrate the energy at low frequencies. The proposed idea, intuitively, can lead to improve the true positive (TP) rate. As a realization, we use the idea in CNN-based face recognition problems as a post-processing step and final features are used in identification. The experimental results show up to 3.4% improvement over LFW dataset.}, Keywords = {Feature extraction - Convolutional neural networks - Wavelet transform - Fourier transform}, volume = {17}, Number = {3}, pages = {141-156}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.141}, url = {http://jsdp.rcisp.ac.ir/article-1-837-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-837-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} } @article{ author = {Mokhlessi, Omid and SeyedMahdaviChabok, Seyedjavad and Alirezaee, Ai}, title = {Selecting effective features from Phonocardiography by Genetic Algorithm based on Pearson`s Coefficients Correlation}, abstract ={The heart is one of the most important organs in the body, which is responsible for pumping blood into the valvular systems. Beside, heart valve disorders are one of the leading causes of death in the world. These disorders are complications in the heart valves that cause the valves to deform or damage, and as a result, the sounds caused by their opening and closing compared to a healthy heart. Obviously, due to the complexities of cardiac audio signals and their recording, designing an accurate diagnosis system free of noise and fast enough is difficult to achieve. One of the most important issues in designing an intelligent heart disease diagnosis system is the use of appropriate primary data. This means that these data must not only be recorded according to the patient's equipment and clinical condition, but also must be labeled according to the correct diagnosis of the physician. However, in this study, an attempt has been made to provide an intelligent system for diagnosing valvular heart failure using phonocardiographic sound signals to have maximum diagnostic power. For this purpose, the signals are labeled and used under the supervision of a specialist doctor. The main goal is to select the effective feature vectors using the genetic optimization method and also based on the evaluation function by Pearson correlation coefficients. Before extraction feature step, preprocessing from data recording, normalization, segmentation, and filtering were used to increase system performance accuracy. For better result, Signal temporal, wavelet and signal energy components are extracted from the prepared signal as feature extraction step. Whereas extracted problem space were not correlated enough, in next step principal component analysis, linear separator analysis, and uncorrelated linear separator analysis methods were used to make feature vectors in a final correlated space. In selecting step, an efficient and simple method is used inorder to estimate the number of optimal features. In general, correlation is a criterion for determining the relationship between variables. The difference between the correlations of all feature subsets is calculated (for both in-class and out-of-class subsets) and then categorized in descending order according to the evaluation function. As a result, in the feature selection step the evaluation function is based on the Pearson statistical method, which is evaluated by a genetic algorithm with the aim of identifying more effective and correlated features in the final vectors. Eventually In this paper, two widely used neural networks with dynamic and static structure including perceptron and Elman neural networks have been used to evaluate the accuracy of the proposed vectors. The results of modeling the process of selecting effective features and diagnosing the disease show the efficiency of the proposed method.}, Keywords = {phonocardiography, cardiac valvular disease, integration features, genetic optimization algorithm, Pearson correlation coefficients}, volume = {17}, Number = {3}, pages = {157-176}, publisher = {Research Center on Developing Advanced Technologies}, doi = {10.29252/jsdp.17.3.157}, url = {http://jsdp.rcisp.ac.ir/article-1-508-en.html}, eprint = {http://jsdp.rcisp.ac.ir/article-1-508-en.pdf}, journal = {Signal and Data Processing}, issn = {2538-4201}, eissn = {2538-421X}, year = {2020} }