Package mvpa :: Package measures :: Module anova
[hide private]
[frames] | no frames]

Source Code for Module mvpa.measures.anova

 1  #emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- 
 2  #ex: set sts=4 ts=4 sw=4 et: 
 3  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
 4  # 
 5  #   See COPYING file distributed along with the PyMVPA package for the 
 6  #   copyright and license terms. 
 7  # 
 8  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
 9  """FeaturewiseDatasetMeasure performing a univariate ANOVA.""" 
10   
11  __docformat__ = 'restructuredtext' 
12   
13  import numpy as N 
14   
15  from mvpa.measures.base import FeaturewiseDatasetMeasure 
16   
17   
18 -class OneWayAnova(FeaturewiseDatasetMeasure):
19 """`FeaturewiseDatasetMeasure` that performs a univariate ANOVA. 20 21 F-scores are computed for each feature as the standard fraction of between 22 and within group variances. Groups are defined by samples with unique 23 labels. 24 25 No statistical testing is performed, but raw F-scores are returned as a 26 sensitivity map. As usual F-scores have a range of [0,inf] with greater 27 values indicating higher sensitivity. 28 """
29 - def __init__(self, **kwargs):
30 """Nothing special to do here. 31 """ 32 # init base classes first 33 FeaturewiseDatasetMeasure.__init__(self, **kwargs)
34 35
36 - def _call(self, dataset):
37 """Computes featurewise f-scores.""" 38 # group means 39 means = [] 40 # with group variance 41 vars_ = [] 42 43 # split by groups -> [groups x [samples x features]] 44 for ul in dataset.uniquelabels: 45 ul_samples = dataset.samples[dataset.labels == ul] 46 means.append(ul_samples.mean(axis=0)) 47 vars_.append(ul_samples.var(axis=0)) 48 49 # mean of within group variances 50 mvw = N.array(vars_).mean(axis=0) 51 # variance of group means 52 vgm = N.array(means).var(axis=0) 53 54 # compute f-scores (in-place to save some cycles) 55 # XXX may cause problems when there are features with no variance in 56 # some groups. One could deal with them here and possibly assign a 57 # zero f-score to throw them out, but at least theoretically zero 58 # variance is possible. Another possiblilty could be to apply 59 # N.nan_to_num(), but this might hide the problem. 60 # Michael therefore thinks that it is best to let the user deal with 61 # it prior to any analysis. 62 vgm /= mvw 63 64 return vgm
65