/usr/share/pyshared/statsmodels/stats/descriptivestats.py is in python-statsmodels 0.4.2-1.2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 | import sys
import numpy as np
from scipy import stats
#from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.table import SimpleTable
def _kurtosis(a):
'''wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options
'''
try:
res = stats.kurtosis(a)
except ValueError:
res = np.nan
return res
def _skew(a):
'''wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options
'''
try:
res = stats.skew(a)
except ValueError:
res = np.nan
return res
class Describe(object):
'''
Calculates descriptive statistics for data.
Defaults to a basic set of statistics, "all" can be specified, or a list can
be given.
dataset : can be either a structured or ndarray (Larry?), observations in
rows, variables in columns.
'''
def __init__(self, dataset):
self.dataset = dataset
#better if this is initially a list to define order, or use an ordered dict
# First position is the function
# Second position is the tuple/list of column names/numbers
# third is are the results in order of the columns
self.univariate = dict(
obs = [len, None, None],
mean = [np.mean, None, None],
std = [np.std, None, None],
min = [np.min, None, None],
max = [np.max, None, None],
ptp = [np.ptp, None, None],
var = [np.var, None, None],
mode_val = [self._mode_val, None, None],
mode_bin = [self._mode_bin, None, None],
median = [np.median, None, None],
skew = [stats.skew, None, None],
uss = [stats.ss, None, None],
kurtosis = [stats.kurtosis, None, None],
percentiles = [self._percentiles, None, None], #BUG: not single value
#sign_test_M = [self.sign_test_m, None, None],
#sign_test_P = [self.sign_test_p, None, None]
)
#TODO: Basic stats for strings
#self.strings = dict(
#unique = [np.unique, None, None],
#number_uniq = [len(
#most = [
#least = [
#TODO: Multivariate
#self.multivariate = dict(
#corrcoef(x[, y, rowvar, bias]),
#cov(m[, y, rowvar, bias]),
#histogram2d(x, y[, bins, range, normed, weights])
#)
self._arraytype = None
self._columns_list = None
def _percentiles(self,x):
p = [stats.scoreatpercentile(x,per) for per in
(1,5,10,25,50,75,90,95,99)]
return p
def _mode_val(self,x):
return stats.mode(x)[0][0]
def _mode_bin(self,x):
return stats.mode(x)[1][0]
def _array_typer(self):
"""if not a sctructured array"""
if not(self.dataset.dtype.names):
"""homogeneous dtype array"""
self._arraytype = 'homog'
elif self.dataset.dtype.names:
"""structured or rec array"""
self._arraytype = 'sctruct'
else:
assert self._arraytype == 'sctruct' or self._arraytype == 'homog'
def _is_dtype_like(self, col):
"""
Check whether self.dataset.[col][0] behaves like a string, numbern unknown.
`numpy.lib._iotools._is_string_like`
"""
def string_like():
#TODO: not sure what the result is if the first item is some type of missing value
try:
self.dataset[col][0] + ''
except (TypeError, ValueError):
return False
return True
def number_like():
try:
self.dataset[col][0] + 1.0
except (TypeError, ValueError):
return False
return True
if number_like()==True and string_like()==False:
return 'number'
elif number_like()==False and string_like()==True:
return 'string'
else:
assert (number_like()==True or string_like()==True), '\
Not sure of dtype'+str(self.dataset[col][0])
#@property
def summary(self, stats='basic', columns='all', orientation='auto'):
"""
prints a table of summary statistics and stores the stats.
stats: The desired statistics, A list[] or 'basic' or 'all' are options
'basic' = ('obs', 'mean', 'std', 'min', 'max')
'all' = ('obs', 'mean', 'std', 'min', 'max', 'ptp', 'var', 'mode',
'meadian', 'skew', 'uss', 'kurtosis', 'percentiles')
Columns: The columns/variables to report the statistics, default is 'all'
structured array: specify the column names
summary(stats='basic', columns=['alpha', 'beta'])
standard array: Specifiy column numbers (NEED TO TEST)
percentiles currently broken
mode requires mode_val and mode_bin separately
"""
if self._arraytype == None:
self._array_typer()
if stats == 'basic':
stats = ('obs', 'mean', 'std', 'min', 'max')
elif stats == 'all':
#stats = self.univariate.keys()
#dict doesn't keep an order, use full list instead
stats = ['obs', 'mean', 'std', 'min', 'max', 'ptp', 'var', 'mode_val', 'mode_bin',
'median', 'uss', 'skew', 'kurtosis', 'percentiles']
else:
for astat in stats:
pass
#assert astat in self.univariate
#hack around percentiles multiple output
#bad naming
import scipy.stats
#BUG: the following has all per the same per=99
## perdict = dict(('perc_%2d'%per, [lambda x: scipy.stats.scoreatpercentile(x, per),
## None, None])
## for per in (1,5,10,25,50,75,90,95,99))
def _fun(per):
return lambda x: scipy.stats.scoreatpercentile(x, per)
perdict = dict(('perc_%02d'%per, [_fun(per), None, None])
for per in (1,5,10,25,50,75,90,95,99))
if 'percentiles' in stats:
self.univariate.update(perdict)
idx = stats.index('percentiles')
stats[idx:idx+1] = sorted(perdict.keys())
#JP: this doesn't allow a change in sequence, sequence in stats is ignored
#this is just an if condition
if any([aitem[1] for aitem in self.univariate.items() if aitem[0] in stats]):
if columns == 'all':
self._columns_list = []
if self._arraytype == 'sctruct':
self._columns_list = self.dataset.dtype.names
#self._columns_list = [col for col in self.dataset.dtype.names if
#(self._is_dtype_like(col)=='number')]
else:
self._columns_list = range(self.dataset.shape[1])
else:
self._columns_list = columns
if self._arraytype == 'sctruct':
for col in self._columns_list:
assert (col in self.dataset.dtype.names)
else:
assert self._is_dtype_like(self.dataset) == 'number'
columstypes = self.dataset.dtype
#TODO: do we need to make sure they dtype is float64 ?
for astat in stats:
calc = self.univariate[astat]
if self._arraytype == 'sctruct':
calc[1] = self._columns_list
calc[2] = [calc[0](self.dataset[col]) for col in
self._columns_list if (self._is_dtype_like(col) ==
'number')]
#calc[2].append([len(np.unique(self.dataset[col])) for col
#in self._columns_list if
#self._is_dtype_like(col)=='string']
else:
calc[1] = ['Col '+str(col) for col in self._columns_list]
calc[2] = [calc[0](self.dataset[:,col]) for col in self._columns_list]
return self.print_summary(stats, orientation=orientation)
else:
return self.print_summary(stats, orientation=orientation)
def print_summary(self, stats, orientation='auto'):
#TODO: need to specify a table formating for the numbers, using defualt
title = 'Summary Statistics'
header = stats
stubs = self.univariate['obs'][1]
data = [[self.univariate[astat][2][col] for astat in stats] for col in
range(len(self.univariate['obs'][2]))]
if (orientation == 'varcols') or \
(orientation == 'auto' and len(stubs) < len(header)):
#swap rows and columns
data = map(lambda *row: list(row), *data)
header, stubs = stubs, header
part_fmt = dict(data_fmts = ["%#8.4g"]*(len(header)-1))
table = SimpleTable(data,
header,
stubs,
title=title,
txt_fmt = part_fmt)
return table
def sign_test(samp,mu0=0):
'''
Signs test with mu0=0 by default (though
the median is often used in practice)
Parameters
----------
samp
mu0
Returns
---------
M, p-value
where
M=(N(+) - N(-))/2, N(+) is the number of values above Mu0,
N(-) is the number of values below. Values equal to Mu0
are discarded.
The p-value for M is calculated using the binomial distrubution
and can be intrepreted the same as for a t-test.
See Also
---------
scipy.stats.wilcoxon
'''
pos=np.sum(samp>mu0)
neg=np.sum(samp<mu0)
M=(pos-neg)/2.
p=stats.binom_test(min(pos,neg),pos+neg,.5)
return M, p
#TODO: There must be a better way but formating the stats of a fuction that
# returns 2 values is a problem.
#def sign_test_m(samp,mu0=0):
#return self.sign_test(samp,mu0)[0]
#def sign_test_p(samp,mu0=0):
#return self.sign_test(samp,mu0)[1]
########################################
########################################
import unittest
data1 = np.array([(1,2,'a','aa'),
(2,3,'b','bb'),
(2,4,'b','cc')],
dtype = [('alpha',float), ('beta', int),
('gamma', '|S1'), ('delta', '|S2')])
data2 = np.array([(1,2),
(2,3),
(2,4)],
dtype = [('alpha',float), ('beta', float)])
data3 = np.array([[1,2,4,4],
[2,3,3,3],
[2,4,4,3]], dtype=float)
data4 = np.array([[1,2,3,4,5,6],
[6,5,4,3,2,1],
[9,9,9,9,9,9]])
class TestSimpleTable(unittest.TestCase):
#from statsmodels.iolib.table import SimpleTable, default_txt_fmt
def test_basic_1(self):
print('test_basic_1')
t1 = Describe(data1)
print(t1.summary())
def test_basic_2(self):
print('test_basic_2')
t2 = Describe(data2)
print(t2.summary())
def test_basic_3(self):
print('test_basic_3')
t1 = Describe(data3)
print(t1.summary())
def test_basic_4(self):
print('test_basic_4')
t1 = Describe(data4)
print(t1.summary())
def test_basic_1a(self):
print('test_basic_1a')
t1 = Describe(data1)
print(t1.summary(stats='basic', columns=['alpha']))
def test_basic_1b(self):
print('test_basic_1b')
t1 = Describe(data1)
print(t1.summary(stats='basic', columns='all'))
def test_basic_2a(self):
print('test_basic_2a')
t2 = Describe(data2)
print(t2.summary(stats='all'))
def test_basic_3(aself):
t1 = Describe(data3)
print(t1.summary(stats='all'))
def test_basic_4a(self):
t1 = Describe(data4)
print(t1.summary(stats='all'))
if __name__ == "__main__":
#unittest.main()
t1 = Describe(data4)
#print(t1.summary(stats='all'))
noperc = ['obs', 'mean', 'std', 'min', 'max', 'ptp', #'mode', #'var',
'median', 'skew', 'uss', 'kurtosis']
#TODO: mode var raise exception,
#TODO: percentile writes list in cell (?), huge wide format
print(t1.summary(stats=noperc))
print(t1.summary())
print(t1.summary( orientation='varcols'))
print(t1.summary(stats=['mean', 'median', 'min', 'max'], orientation=('varcols')))
print(t1.summary(stats='all'))
|