Package pylal :: Module bayespputils
[hide private]
[frames] | no frames]

Source Code for Module pylal.bayespputils

   1  # -*- coding: utf-8 -*- 
   2  # 
   3  #       bayespputils.py 
   4  # 
   5  #       Copyright 2010 
   6  #       Benjamin Aylott <benjamin.aylott@ligo.org>, 
   7  #       Benjamin Farr <bfarr@u.northwestern.edu>, 
   8  #       Will M. Farr <will.farr@ligo.org>, 
   9  #       John Veitch <john.veitch@ligo.org>, 
  10  #       Salvatore Vitale <salvatore.vitale@ligo.org>, 
  11  #       Vivien Raymond <vivien.raymond@ligo.org> 
  12  # 
  13  #       This program is free software; you can redistribute it and/or modify 
  14  #       it under the terms of the GNU General Public License as published by 
  15  #       the Free Software Foundation; either version 2 of the License, or 
  16  #       (at your option) any later version. 
  17  # 
  18  #       This program is distributed in the hope that it will be useful, 
  19  #       but WITHOUT ANY WARRANTY; without even the implied warranty of 
  20  #       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
  21  #       GNU General Public License for more details. 
  22  # 
  23  #       You should have received a copy of the GNU General Public License 
  24  #       along with this program; if not, write to the Free Software 
  25  #       Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 
  26  #       MA 02110-1301, USA. 
  27   
  28  #=============================================================================== 
  29  # Preamble 
  30  #=============================================================================== 
  31   
  32  """ 
  33  This module contains classes and functions for post-processing the output 
  34  of the Bayesian parameter estimation codes. 
  35  """ 
  36   
  37  #standard library imports 
  38  import os 
  39  import sys 
  40  from math import cos,ceil,floor,sqrt,pi as pi_constant 
  41  import xml 
  42  from xml.dom import minidom 
  43  from operator import itemgetter 
  44   
  45  #related third party imports 
  46  from lalinference.io import read_samples 
  47  import healpy as hp 
  48  import astropy.table 
  49  import lalinference.plot.cmap 
  50  import numpy as np 
  51  from numpy import fmod 
  52  import matplotlib 
  53  from matplotlib import pyplot as plt,cm as mpl_cm,lines as mpl_lines 
  54  from scipy import stats 
  55  from scipy import special 
  56  from scipy import signal 
  57  from scipy.optimize import newton 
  58  from scipy import interpolate 
  59  from numpy import linspace 
  60  import random 
  61  import socket 
  62  from itertools import combinations 
  63  from lalinference import LALInferenceHDF5PosteriorSamplesDatasetName as posterior_grp_name 
  64  import re 
  65   
  66  try: 
  67      import lalsimulation as lalsim 
  68  except ImportError: 
  69      print('Cannot import lalsimulation SWIG bindings') 
  70      raise 
  71   
  72  try: 
  73      from lalinference.imrtgr.nrutils import bbh_final_mass_non_spinning_Panetal, bbh_final_spin_non_spinning_Panetal, bbh_final_spin_non_precessing_Healyetal, bbh_final_mass_non_precessing_Healyetal, bbh_final_spin_projected_spin_Healyetal, bbh_final_mass_projected_spin_Healyetal, bbh_aligned_Lpeak_6mode_SHXJDK 
  74  except ImportError: 
  75      print('Cannot import lalinference.imrtgr.nrutils. Will suppress final parameter calculations.') 
  76   
  77  from matplotlib.ticker import FormatStrFormatter,ScalarFormatter,AutoMinorLocator 
  78   
  79  try: 
  80    hostname_short=socket.gethostbyaddr(socket.gethostname())[0].split('.',1)[1] 
  81  except: 
  82    hostname_short='Unknown' 
  83  if hostname_short=='ligo.caltech.edu' or hostname_short=='cluster.ldas.cit': #The CIT cluster has troubles with the default 'cm' font. 'custom' has the least troubles, but does not include \odot 
  84    matplotlib.rcParams.update( 
  85                               {'mathtext.fontset' : "custom", 
  86                               'mathtext.fallback_to_cm' : True 
  87                               }) 
  88   
  89  from xml.etree.cElementTree import Element, SubElement, ElementTree, Comment, tostring, XMLParser 
  90   
  91  #local application/library specific imports 
  92  import pylal 
  93  import lal 
  94  from glue.ligolw import lsctables 
  95  from glue.ligolw import utils 
  96  from pylal import git_version 
  97  #C extensions 
  98  from _bayespputils import _skyhist_cart,_burnin 
  99   
 100  __author__="Ben Aylott <benjamin.aylott@ligo.org>, Ben Farr <bfarr@u.northwestern.edu>, Will M. Farr <will.farr@ligo.org>, John Veitch <john.veitch@ligo.org>, Vivien Raymond <vivien.raymond@ligo.org>" 
 101  __version__= "git id %s"%git_version.id 
 102  __date__= git_version.date 
103 104 -def replace_column(table, old, new):
105 """Workaround for missing astropy.table.Table.replace_column method, 106 which was added in Astropy 1.1. 107 108 FIXME: remove this function when LALSuite depends on Astropy >= 1.1.""" 109 index = table.colnames.index(old) 110 table.remove_column(old) 111 table.add_column(astropy.table.Column(new, name=old), index=index)
112
113 -def as_array(table):
114 """Workaround for missing astropy.table.Table.as_array method, 115 which was added in Astropy 1.0. 116 117 FIXME: remove this function when LALSuite depends on Astropy >= 1.0.""" 118 try: 119 return table.as_array() 120 except: 121 return table._data
122 123 #=============================================================================== 124 # Constants 125 #=============================================================================== 126 #Parameters which are not to be exponentiated when found 127 logParams=['logl','loglh1','loglh2','logll1','loglv1','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','logw','logprior','logpost','nulllogl','chain_log_evidence','chain_delta_log_evidence','chain_log_noise_evidence','chain_log_bayes_factor'] 128 #Parameters known to cbcBPP 129 relativePhaseParams=[ a+b+'_relative_phase' for a,b in combinations(['h1','l1','v1'],2)] 130 snrParams=['snr','optimal_snr','matched_filter_snr'] + ['%s_optimal_snr'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_amp'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_arg'%(i) for i in ['h1', 'l1', 'v1']] + relativePhaseParams 131 calAmpParams=['calamp_%s'%(ifo) for ifo in ['h1','l1','v1']] 132 calPhaseParams=['calpha_%s'%(ifo) for ifo in ['h1','l1','v1']] 133 calParams = calAmpParams + calPhaseParams 134 # Masses 135 massParams=['m1','m2','chirpmass','mchirp','mc','eta','q','massratio','asym_massratio','mtotal','mf'] 136 #Spins 137 spinParamsPrec=['a1','a2','phi1','theta1','phi2','theta2','costilt1','costilt2','costheta_jn','cosbeta','tilt1','tilt2','phi_jl','theta_jn','phi12','af'] 138 spinParamsAli=['spin1','spin2','a1z','a2z'] 139 spinParamsEff=['chi','effectivespin','chi_eff','chi_tot','chi_p'] 140 spinParams=spinParamsPrec+spinParamsEff+spinParamsAli 141 # Source frame params 142 cosmoParam=['m1_source','m2_source','mtotal_source','mc_source','redshift','mf_source'] 143 #Strong Field 144 ppEParams=['ppEalpha','ppElowera','ppEupperA','ppEbeta','ppElowerb','ppEupperB','alphaPPE','aPPE','betaPPE','bPPE'] 145 tigerParams=['dchi%i'%(i) for i in range(8)] + ['dchi%il'%(i) for i in [5,6] ] + ['dxi%d'%(i+1) for i in range(6)] + ['dalpha%i'%(i+1) for i in range(5)] + ['dbeta%i'%(i+1) for i in range(3)] + ['dsigma%i'%(i+1) for i in range(4)] 146 bransDickeParams=['omegaBD','ScalarCharge1','ScalarCharge2'] 147 massiveGravitonParams=['lambdaG'] 148 tidalParams=['lambda1','lambda2','lam_tilde','dlam_tilde','lambdat','dlambdat'] 149 energyParams=['e_rad', 'l_peak'] 150 strongFieldParams=ppEParams+tigerParams+bransDickeParams+massiveGravitonParams+tidalParams+energyParams 151 152 #Extrinsic 153 distParams=['distance','distMPC','dist'] 154 incParams=['iota','inclination','cosiota'] 155 polParams=['psi','polarisation','polarization'] 156 skyParams=['ra','rightascension','declination','dec'] 157 phaseParams=['phase', 'phi0','phase_maxl'] 158 #Times 159 timeParams=['time','time_mean'] 160 endTimeParams=['l1_end_time','h1_end_time','v1_end_time'] 161 #others 162 statsParams=['logprior','logl','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','deltaloglh2','deltaloglg1'] 163 calibParams=['calpha_l1','calpha_h1','calpha_v1','calamp_l1','calamp_h1','calamp_v1'] 164 165 ## Greedy bin sizes for cbcBPP and confidence leves used for the greedy bin intervals 166 confidenceLevels=[0.67,0.9,0.95,0.99] 167 168 greedyBinSizes={'mc':0.025,'m1':0.1,'m2':0.1,'mass1':0.1,'mass2':0.1,'mtotal':0.1,'mc_source':0.025,'m1_source':0.1,'m2_source':0.1,'mtotal_source':0.1,'eta':0.001,'q':0.01,'asym_massratio':0.01,'iota':0.01,'cosiota':0.02,'time':1e-4,'time_mean':1e-4,'distance':1.0,'dist':1.0,'redshift':0.01,'mchirp':0.025,'chirpmass':0.025,'spin1':0.04,'spin2':0.04,'a1z':0.04,'a2z':0.04,'a1':0.02,'a2':0.02,'phi1':0.05,'phi2':0.05,'theta1':0.05,'theta2':0.05,'ra':0.05,'dec':0.05,'chi':0.05,'chi_eff':0.05,'chi_tot':0.05,'chi_p':0.05,'costilt1':0.02,'costilt2':0.02,'thatas':0.05,'costheta_jn':0.02,'beta':0.05,'omega':0.05,'cosbeta':0.02,'ppealpha':1.0,'ppebeta':1.0,'ppelowera':0.01,'ppelowerb':0.01,'ppeuppera':0.01,'ppeupperb':0.01,'polarisation':0.04,'rightascension':0.05,'declination':0.05,'massratio':0.001,'inclination':0.01,'phase':0.05,'tilt1':0.05,'tilt2':0.05,'phi_jl':0.05,'theta_jn':0.05,'phi12':0.05,'flow':1.0,'phase_maxl':0.05,'calamp_l1':0.01,'calamp_h1':0.01,'calamp_v1':0.01,'calpha_h1':0.01,'calpha_l1':0.01,'calpha_v1':0.01,'logdistance':0.1,'psi':0.1,'costheta_jn':0.1,'mf':0.1,'mf_source':0.1,'af':0.02,'e_rad':0.1,'l_peak':0.1} 169 for s in snrParams: 170 greedyBinSizes[s]=0.05 171 for derived_time in ['h1_end_time','l1_end_time','v1_end_time','h1l1_delay','l1v1_delay','h1v1_delay']: 172 greedyBinSizes[derived_time]=greedyBinSizes['time'] 173 for derived_phase in relativePhaseParams: 174 greedyBinSizes[derived_phase]=0.05 175 for param in tigerParams + bransDickeParams + massiveGravitonParams: 176 greedyBinSizes[param]=0.01 177 for param in tidalParams: 178 greedyBinSizes[param]=2.5 179 #Confidence levels 180 for loglname in statsParams: 181 greedyBinSizes[loglname]=0.1 182 183 #Pre-defined ordered list of line styles for use in matplotlib contour plots. 184 __default_line_styles=['solid', 'dashed', 'dashdot', 'dotted'] 185 #Pre-defined ordered list of matplotlib colours for use in plots. 186 __default_color_lst=['r','b','y','g','c','m'] 187 #A default css string for use in html results pages. 188 __default_css_string=""" 189 p,h1,h2,h3,h4,h5 190 { 191 font-family:"Trebuchet MS", Arial, Helvetica, sans-serif; 192 } 193 194 p 195 { 196 font-size:14px; 197 } 198 199 h1 200 { 201 font-size:20px; 202 } 203 204 h2 205 { 206 font-size:18px; 207 } 208 209 h3 210 { 211 font-size:16px; 212 } 213 214 215 216 table 217 { 218 font-family:"Trebuchet MS", Arial, Helvetica, sans-serif; 219 width:100%; 220 border-collapse:collapse; 221 } 222 td,th 223 { 224 font-size:12px; 225 border:1px solid #B5C1CF; 226 padding:3px 7px 2px 7px; 227 } 228 th 229 { 230 font-size:14px; 231 text-align:left; 232 padding-top:5px; 233 padding-bottom:4px; 234 background-color:#B3CEEF; 235 color:#ffffff; 236 } 237 #postable tr:hover 238 { 239 background: #DFF4FF; 240 } 241 #covtable tr:hover 242 { 243 background: #DFF4FF; 244 } 245 #statstable tr:hover 246 { 247 background: #DFF4FF; 248 } 249 250 img { 251 max-width: 510px; 252 max-height: 510px; 253 width:100%; 254 eight:100%; 255 } 256 257 .ppsection 258 { 259 border-bottom-style:double; 260 } 261 262 """ 263 __default_javascript_string=''' 264 //<![CDATA[ 265 function toggle_visibility(tbid,lnkid) 266 { 267 268 if(document.all){document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'block' ? 'none' : 'block';} 269 270 else{document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'table' ? 'none' : 'table';} 271 272 document.getElementById(lnkid).value = document.getElementById(lnkid).value == '[-] Collapse' ? '[+] Expand' : '[-] Collapse'; 273 274 } 275 //]]> 276 277 ''' 278 279 #import sim inspiral table content handler 280 from pylal.SimInspiralUtils import ExtractSimInspiralTableLIGOLWContentHandler 281 from glue.ligolw import lsctables 282 lsctables.use_in(ExtractSimInspiralTableLIGOLWContentHandler)
283 284 #=============================================================================== 285 # Function to return the correct prior distribution for selected parameters 286 #=============================================================================== 287 -def get_prior(name):
288 distributions={ 289 'm1':'uniform', 290 'm2':'uniform', 291 'mc':None, 292 'eta':None, 293 'q':None, 294 'mtotal':'uniform', 295 'm1_source':None, 296 'm2_source':None, 297 'mtotal_source':None, 298 'mc_source':None, 299 'redshift':None, 300 'mf':None, 301 'mf_source':None, 302 'af':None, 303 'e_rad':None, 304 'l_peak':None, 305 'spin1':'uniform', 306 'spin2':'uniform', 307 'a1':'uniform', 308 'a2':'uniform', 309 'a1z':'uniform', 310 'a2z':'uniform', 311 'theta1':'uniform', 312 'theta2':'uniform', 313 'phi1':'uniform', 314 'phi2':'uniform', 315 'chi_eff':None, 316 'chi_tot':None, 317 'chi_p':None, 318 'tilt1':None, 319 'tilt2':None, 320 'costilt1':'uniform', 321 'costilt2':'uniform', 322 'iota':'np.cos', 323 'cosiota':'uniform', 324 'time':'uniform', 325 'time_mean':'uniform', 326 'dist':'x**2', 327 'ra':'uniform', 328 'dec':'np.cos', 329 'phase':'uniform', 330 'psi':'uniform', 331 'theta_jn':'np.sin', 332 'costheta_jn':'uniform', 333 'beta':None, 334 'cosbeta':None, 335 'phi_jl':None, 336 'phi12':None, 337 'logl':None, 338 'h1_end_time':None, 339 'l1_end_time':None, 340 'v1_end_time':None, 341 'h1l1_delay':None, 342 'h1v1_delay':None, 343 'l1v1_delay':None, 344 'lambdat' :None, 345 'dlambdat':None, 346 'lambda1' : 'uniform', 347 'lambda2': 'uniform', 348 'lam_tilde' : None, 349 'dlam_tilde': None, 350 'calamp_h1' : 'uniform', 351 'calamp_l1' : 'uniform', 352 'calpha_h1' : 'uniform', 353 'calpha_l1' : 'uniform', 354 'polar_eccentricity':'uniform', 355 'polar_angle':'uniform', 356 'alpha':'uniform' 357 } 358 try: 359 return distributions(name) 360 except: 361 return None
362
363 #=============================================================================== 364 # Function used to generate plot labels. 365 #=============================================================================== 366 -def plot_label(param):
367 """ 368 A lookup table for plot labels. 369 """ 370 m1_names = ['mass1', 'm1'] 371 m2_names = ['mass2', 'm2'] 372 mc_names = ['mc','mchirp','chirpmass'] 373 eta_names = ['eta','massratio','sym_massratio'] 374 q_names = ['q','asym_massratio'] 375 iota_names = ['iota','incl','inclination'] 376 dist_names = ['dist','distance'] 377 ra_names = ['rightascension','ra'] 378 dec_names = ['declination','dec'] 379 phase_names = ['phi_orb', 'phi', 'phase', 'phi0'] 380 gr_test_names = ['dchi%d'%i for i in range(8)]+['dchil%d'%i for i in [5,6]]+['dxi%d'%(i+1) for i in range(6)]+['dalpha%d'%(i+1) for i in range(5)]+['dbeta%d'%(i+1) for i in range(3)]+['dsigma%d'%(i+1) for i in range(4)] 381 382 labels={ 383 'm1':r'$m_1\,(\mathrm{M}_\odot)$', 384 'm2':r'$m_2\,(\mathrm{M}_\odot)$', 385 'mc':r'$\mathcal{M}\,(\mathrm{M}_\odot)$', 386 'eta':r'$\eta$', 387 'q':r'$q$', 388 'mtotal':r'$M_\mathrm{total}\,(\mathrm{M}_\odot)$', 389 'm1_source':r'$m_{1}^\mathrm{source}\,(\mathrm{M}_\odot)$', 390 'm2_source':r'$m_{2}^\mathrm{source}\,(\mathrm{M}_\odot)$', 391 'mtotal_source':r'$M_\mathrm{total}^\mathrm{source}\,(\mathrm{M}_\odot)$', 392 'mc_source':r'$\mathcal{M}^\mathrm{source}\,(\mathrm{M}_\odot)$', 393 'redshift':r'$z$', 394 'mf':r'$M_\mathrm{final}\,(\mathrm{M}_\odot)$', 395 'mf_source':r'$M_\mathrm{final}^\mathrm{source}\,(\mathrm{M}_\odot)$', 396 'af':r'$a_\mathrm{final}$', 397 'e_rad':r'$E_\mathrm{rad}\,(\mathrm{M}_\odot)$', 398 'l_peak':r'$L_\mathrm{peak}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$', 399 'spin1':r'$S_1$', 400 'spin2':r'$S_2$', 401 'a1':r'$a_1$', 402 'a2':r'$a_2$', 403 'a1z':r'$a_{1z}$', 404 'a2z':r'$a_{2z}$', 405 'theta1':r'$\theta_1\,(\mathrm{rad})$', 406 'theta2':r'$\theta_2\,(\mathrm{rad})$', 407 'phi1':r'$\phi_1\,(\mathrm{rad})$', 408 'phi2':r'$\phi_2\,(\mathrm{rad})$', 409 'chi_eff':r'$\chi_\mathrm{eff}$', 410 'chi_tot':r'$\chi_\mathrm{total}$', 411 'chi_p':r'$\chi_\mathrm{P}$', 412 'tilt1':r'$t_1\,(\mathrm{rad})$', 413 'tilt2':r'$t_2\,(\mathrm{rad})$', 414 'costilt1':r'$\mathrm{cos}(t_1)$', 415 'costilt2':r'$\mathrm{cos}(t_2)$', 416 'iota':r'$\iota\,(\mathrm{rad})$', 417 'cosiota':r'$\mathrm{cos}(\iota)$', 418 'time':r'$t_\mathrm{c}\,(\mathrm{s})$', 419 'time_mean':r'$<t>\,(\mathrm{s})$', 420 'dist':r'$d_\mathrm{L}\,(\mathrm{Mpc})$', 421 'ra':r'$\alpha$', 422 'dec':r'$\delta$', 423 'phase':r'$\phi\,(\mathrm{rad})$', 424 'psi':r'$\psi\,(\mathrm{rad})$', 425 'theta_jn':r'$\theta_\mathrm{JN}\,(\mathrm{rad})$', 426 'costheta_jn':r'$\mathrm{cos}(\theta_\mathrm{JN})$', 427 'beta':r'$\beta\,(\mathrm{rad})$', 428 'cosbeta':r'$\mathrm{cos}(\beta)$', 429 'phi_jl':r'$\phi_\mathrm{JL}\,(\mathrm{rad})$', 430 'phi12':r'$\phi_\mathrm{12}\,(\mathrm{rad})$', 431 'logl':r'$\mathrm{log}(\mathcal{L})$', 432 'h1_end_time':r'$t_\mathrm{H}$', 433 'l1_end_time':r'$t_\mathrm{L}$', 434 'v1_end_time':r'$t_\mathrm{V}$', 435 'h1l1_delay':r'$\Delta t_\mathrm{HL}$', 436 'h1v1_delay':r'$\Delta t_\mathrm{HV}$', 437 'l1v1_delay':r'$\Delta t_\mathrm{LV}$', 438 'lambdat' : r'$\tilde{\Lambda}$', 439 'dlambdat': r'$\delta \tilde{\Lambda}$', 440 'lambda1' : r'$\lambda_1$', 441 'lambda2': r'$\lambda_2$', 442 'lam_tilde' : r'$\tilde{\Lambda}$', 443 'dlam_tilde': r'$\delta \tilde{\Lambda}$', 444 'calamp_h1' : r'$\delta A_{H1}$', 445 'calamp_l1' : r'$\delta A_{L1}$', 446 'calpha_h1' : r'$\delta \phi_{H1}$', 447 'calpha_l1' : r'$\delta \phi_{L1}$', 448 'polar_eccentricity':r'$\epsilon_{polar}$', 449 'polar_angle':r'$\alpha_{polar}$', 450 'alpha':r'$\alpha_{polar}$', 451 'dchi0':r'$d\chi_0$', 452 'dchi1':r'$d\chi_1$', 453 'dchi2':r'$d\chi_2$', 454 'dchi3':r'$d\chi_3$', 455 'dchi4':r'$d\chi_4$', 456 'dchi5':r'$d\chi_5$', 457 'dchi5l':r'$d\chi_{5}^{(l)}$', 458 'dchi6':r'$d\chi_6$', 459 'dchi6l':r'$d\chi_{6}^{(l)}$', 460 'dchi7':r'$d\chi_7$', 461 'dxi1':r'$d\xi_1$', 462 'dxi2':r'$d\xi_2$', 463 'dxi3':r'$d\xi_3$', 464 'dxi4':r'$d\xi_4$', 465 'dxi5':r'$d\xi_5$', 466 'dxi6':r'$d\xi_6$', 467 'dalpha1':r'$d\alpha_1$', 468 'dalpha2':r'$d\alpha_2$', 469 'dalpha3':r'$d\alpha_3$', 470 'dalpha4':r'$d\alpha_4$', 471 'dalpha5':r'$d\alpha_5$', 472 'dbeta1':r'$d\beta_1$', 473 'dbeta2':r'$d\beta_2$', 474 'dbeta3':r'$d\beta_3$', 475 'dsigma1':r'$d\sigma_1$', 476 'dsigma2':r'$d\sigma_2$', 477 'dsigma3':r'$d\sigma_3$', 478 'dsigma4':r'$d\sigma_4$', 479 } 480 481 # Handle cases where multiple names have been used 482 if param in m1_names: 483 param = 'm1' 484 elif param in m2_names: 485 param = 'm2' 486 elif param in mc_names: 487 param = 'mc' 488 elif param in eta_names: 489 param = 'eta' 490 elif param in q_names: 491 param = 'q' 492 elif param in iota_names: 493 param = 'iota' 494 elif param in dist_names: 495 param = 'dist' 496 elif param in ra_names: 497 param = 'ra' 498 elif param in dec_names: 499 param = 'dec' 500 elif param in phase_names: 501 param = 'phase' 502 503 try: 504 label = labels[param] 505 except KeyError: 506 # Use simple string if no formated label is available for param 507 label = param 508 509 return label
510
511 #=============================================================================== 512 # Class definitions 513 #=============================================================================== 514 515 -class PosteriorOneDPDF(object):
516 """ 517 A data structure representing one parameter in a chain of posterior samples. 518 The Posterior class generates instances of this class for pivoting onto a given 519 parameter (the Posterior class is per-Sampler oriented whereas this class represents 520 the same one parameter in successive samples in the chain). 521 """
522 - def __init__(self,name,posterior_samples,injected_value=None,injFref=None,trigger_values=None,prior=None):
523 """ 524 Create an instance of PosteriorOneDPDF based on a table of posterior_samples. 525 526 @param name: A literal string name for the parameter. 527 @param posterior_samples: A 1D array of the samples. 528 @keyword injected_value: The injected or real value of the parameter. 529 @type injected_value: glue.ligolw.lsctables.SimInspiral 530 @keyword trigger_values: The trigger values of the parameter (dictionary w/ IFOs as keys). 531 @keyword prior: The prior value corresponding to each sample. 532 """ 533 self.__name=name 534 self.__posterior_samples=np.array(posterior_samples) 535 536 self.__injFref=injFref 537 self.__injval=injected_value 538 self.__trigvals=trigger_values 539 self.__prior=prior 540 541 return
542
543 - def __len__(self):
544 """ 545 Container method. Defined as number of samples. 546 """ 547 return len(self.__posterior_samples)
548
549 - def __getitem__(self,idx):
550 """ 551 Container method . Returns posterior containing sample idx (allows slicing). 552 """ 553 return PosteriorOneDPDF(self.__name, self.__posterior_samples[idx], injected_value=self.__injval, f_ref=self.__f_ref, trigger_values=self.__trigvals)
554 555 @property
556 - def name(self):
557 """ 558 Return the string literal name of the parameter. 559 560 @rtype: string 561 """ 562 return self.__name
563 564 @property
565 - def mean(self):
566 """ 567 Return the arithmetic mean for the marginal PDF on the parameter. 568 569 @rtype: number 570 """ 571 return np.mean(self.__posterior_samples)
572 573 @property
574 - def median(self):
575 """ 576 Return the median value for the marginal PDF on the parameter. 577 578 @rtype: number 579 """ 580 return np.median(self.__posterior_samples)
581 582 @property
583 - def stdev(self):
584 """ 585 Return the standard deviation of the marginal PDF on the parameter. 586 587 @rtype: number 588 """ 589 try: 590 stdev = sqrt(np.var(self.__posterior_samples)) 591 if not np.isfinite(stdev): 592 raise OverflowError 593 except OverflowError: 594 mean = np.mean(self.__posterior_samples) 595 stdev = mean * sqrt(np.var(self.__posterior_samples/mean)) 596 return stdev
597 598 @property
599 - def stacc(self):
600 """ 601 Return the 'standard accuracy statistic' (stacc) of the marginal 602 posterior of the parameter. 603 604 stacc is a standard deviant incorporating information about the 605 accuracy of the waveform recovery. Defined as the mean of the sum 606 of the squared differences between the points in the PDF 607 (x_i - sampled according to the posterior) and the true value 608 (x_{\rm true}). So for a marginalized one-dimensional PDF: 609 stacc = \sqrt{\frac{1}{N}\sum_{i=1}^N (x_i-x_{\rm true})2} 610 611 @rtype: number 612 """ 613 if self.__injval is None: 614 return None 615 else: 616 return np.sqrt(np.mean((self.__posterior_samples - self.__injval)**2.0))
617 618 @property
619 - def injval(self):
620 """ 621 Return the injected value set at construction . If no value was set 622 will return None . 623 624 @rtype: undefined 625 """ 626 return self.__injval
627 628 @property
629 - def trigvals(self):
630 """ 631 Return the trigger values set at construction. If no value was set 632 will return None . 633 634 @rtype: undefined 635 """ 636 return self.__trigvals
637 638 #@injval.setter #Python 2.6+
639 - def set_injval(self,new_injval):
640 """ 641 Set the injected/real value of the parameter. 642 643 @param new_injval: The injected/real value to set. 644 @type new_injval: glue.ligolw.lsctables.SimInspiral 645 """ 646 647 self.__injval=new_injval
648
649 - def set_trigvals(self,new_trigvals):
650 """ 651 Set the trigger values of the parameter. 652 653 @param new_trigvals: Dictionary containing trigger values with IFO keys. 654 @type new_trigvals: glue.ligolw.lsctables.SnglInspiral 655 """ 656 657 self.__trigvals=new_trigvals
658 659 @property
660 - def samples(self):
661 """ 662 Return a 1D numpy.array of the samples. 663 664 @rtype: numpy.array 665 """ 666 return self.__posterior_samples
667
668 - def delete_samples_by_idx(self,samples):
669 """ 670 Remove samples from posterior, analagous to numpy.delete but opperates in place. 671 672 @param samples: A list containing the indexes of the samples to be removed. 673 @type samples: list 674 """ 675 self.__posterior_samples=np.delete(self.__posterior_samples,samples).reshape(-1,1)
676 677 @property
678 - def gaussian_kde(self):
679 """ 680 Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples. 681 682 @rtype: scipy.stats.kde.gaussian_kde 683 """ 684 from numpy import seterr as np_seterr 685 from scipy import seterr as sp_seterr 686 687 np_seterr(under='ignore') 688 sp_seterr(under='ignore') 689 try: 690 return_value=stats.kde.gaussian_kde(np.transpose(self.__posterior_samples)) 691 except: 692 exfile=open('exception.out','w') 693 np.savetxt(exfile,self.__posterior_samples) 694 exfile.close() 695 raise 696 697 return return_value
698 699 @property
700 - def KL(self):
701 """Returns the KL divergence between the prior and the posterior. 702 It measures the relative information content in nats. The prior is evaluated 703 at run time. It defaults to None. If None is passed, it just returns the information content 704 of the posterior." 705 """ 706 707 def uniform(x): 708 return np.array([1./(np.max(x)-np.min(x)) for _ in x])
709 710 posterior, dx = np.histogram(self.samples,bins=36,normed=True) 711 from scipy.stats import entropy 712 # check the kind of prior and process the string accordingly 713 prior = get_prior(self.name) 714 if prior is None: 715 raise ValueError 716 elif prior=='uniform': 717 prior+='(self.samples)' 718 elif 'x' in prior: 719 prior.replace('x','self.samples') 720 elif not(prior.startswith('np.')): 721 prior = 'np.'+prior 722 prior+='(self.samples)' 723 else: 724 raise ValueError 725 726 try: 727 prior_dist = eval(prior) 728 except: 729 raise ValueError 730 731 return entropy(posterior, qk=prior_dist)
732
733 - def prob_interval(self,intervals):
734 """ 735 Evaluate probability intervals. 736 737 @param intervals: A list of the probability intervals [0-1] 738 @type intervals: list 739 """ 740 list_of_ci=[] 741 samples_temp=np.sort(np.squeeze(self.samples)) 742 743 for interval in intervals: 744 if interval<1.0: 745 samples_temp 746 N=np.size(samples_temp) 747 #Find index of lower bound 748 lower_idx=int(floor((N/2.0)*(1-interval))) 749 if lower_idx<0: 750 lower_idx=0 751 #Find index of upper bound 752 upper_idx=N-int(floor((N/2.0)*(1-interval))) 753 if upper_idx>N: 754 upper_idx=N-1 755 756 list_of_ci.append((float(samples_temp[lower_idx]),float(samples_temp[upper_idx]))) 757 else: 758 list_of_ci.append((None,None)) 759 760 return list_of_ci
761
762 -class Posterior(object):
763 """ 764 Data structure for a table of posterior samples . 765 """
766 - def __init__(self,commonResultsFormatData,SimInspiralTableEntry=None,inj_spin_frame='OrbitalL', injFref=100,SnglInpiralList=None,name=None,description=None,votfile=None):
767 """ 768 Constructor. 769 770 @param commonResultsFormatData: A 2D array containing the posterior 771 samples and related data. The samples chains form the columns. 772 @type commonResultsFormatData: custom type 773 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values. 774 @type SimInspiralTableEntry: glue.ligolw.lsctables.SimInspiral 775 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers. 776 @type SnglInspiralList: list 777 778 """ 779 common_output_table_header,common_output_table_raw =commonResultsFormatData 780 self._posterior={} 781 self._injFref=injFref 782 self._injection=SimInspiralTableEntry 783 784 self._triggers=SnglInpiralList 785 self._loglaliases=['deltalogl', 'posterior', 'logl','logL','likelihood'] 786 self._logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior'] 787 self._votfile=votfile 788 789 common_output_table_header=[i.lower() for i in common_output_table_header] 790 791 # Define XML mapping 792 self._injXMLFuncMap={ 793 'mchirp':lambda inj:inj.mchirp, 794 'chirpmass':lambda inj:inj.mchirp, 795 'mc':lambda inj:inj.mchirp, 796 'mass1':lambda inj:inj.mass1, 797 'm1':lambda inj:inj.mass1, 798 'mass2':lambda inj:inj.mass2, 799 'm2':lambda inj:inj.mass2, 800 'mtotal':lambda inj:float(inj.mass1)+float(inj.mass2), 801 'eta':lambda inj:inj.eta, 802 'q':self._inj_q, 803 'asym_massratio':self._inj_q, 804 'massratio':lambda inj:inj.eta, 805 'sym_massratio':lambda inj:inj.eta, 806 'time': lambda inj:float(inj.get_end()), 807 'time_mean': lambda inj:float(inj.get_end()), 808 'end_time': lambda inj:float(inj.get_end()), 809 'phi0':lambda inj:inj.phi0, 810 'phi_orb': lambda inj: inj.coa_phase, 811 'dist':lambda inj:inj.distance, 812 'distance':lambda inj:inj.distance, 813 'ra':self._inj_longitude, 814 'rightascension':self._inj_longitude, 815 'long':self._inj_longitude, 816 'longitude':self._inj_longitude, 817 'dec':lambda inj:inj.latitude, 818 'declination':lambda inj:inj.latitude, 819 'lat':lambda inj:inj.latitude, 820 'latitude':lambda inj:inj.latitude, 821 'psi': lambda inj: np.mod(inj.polarization, np.pi), 822 'f_ref': lambda inj: self._injFref, 823 'polarisation':lambda inj:inj.polarization, 824 'polarization':lambda inj:inj.polarization, 825 'h1_end_time':lambda inj:float(inj.get_end('H')), 826 'l1_end_time':lambda inj:float(inj.get_end('L')), 827 'v1_end_time':lambda inj:float(inj.get_end('V')), 828 'lal_amporder':lambda inj:inj.amp_order} 829 830 # Add on all spin parameterizations 831 for key, val in self._inj_spins(self._injection, frame=inj_spin_frame).items(): 832 self._injXMLFuncMap[key] = val 833 834 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header): 835 836 self._posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref,trigger_values=self._gettrigpar(param_name)) 837 838 if 'mchirp' in common_output_table_header and 'eta' in common_output_table_header \ 839 and (not 'm1' in common_output_table_header) and (not 'm2' in common_output_table_header): 840 try: 841 print 'Inferring m1 and m2 from mchirp and eta' 842 (m1,m2)=mc2ms(self._posterior['mchirp'].samples, self._posterior['eta'].samples) 843 self._posterior['m1']=PosteriorOneDPDF('m1',m1,injected_value=self._getinjpar('m1'),trigger_values=self._gettrigpar('m1')) 844 self._posterior['m2']=PosteriorOneDPDF('m2',m2,injected_value=self._getinjpar('m2'),trigger_values=self._gettrigpar('m2')) 845 except KeyError: 846 print 'Unable to deduce m1 and m2 from input columns' 847 848 849 logLFound=False 850 851 for loglalias in self._loglaliases: 852 853 if loglalias in common_output_table_header: 854 try: 855 self._logL=self._posterior[loglalias].samples 856 except KeyError: 857 print "No '%s' column in input table!"%loglalias 858 continue 859 logLFound=True 860 861 if not logLFound: 862 raise RuntimeError("No likelihood/posterior values found!") 863 self._logP=None 864 865 for logpalias in self._logpaliases: 866 if logpalias in common_output_table_header: 867 try: 868 self._logP=self._posterior[logpalias].samples 869 except KeyError: 870 print "No '%s' column in input table!"%logpalias 871 continue 872 if not 'log' in logpalias: 873 self._logP=[np.log(i) for i in self._logP] 874 875 if name is not None: 876 self.__name=name 877 878 if description is not None: 879 self.__description=description 880 881 return
882
883 - def extend_posterior(self):
884 """ 885 Add some usefule derived parameters (such as tilt angles, time delays, etc) in the Posterior object 886 """ 887 injection=self._injection 888 pos=self 889 # Generate component mass posterior samples (if they didnt exist already) 890 if 'mc' in pos.names: 891 mchirp_name = 'mc' 892 elif 'chirpmass' in pos.names: 893 mchirp_name = 'chirpmass' 894 else: 895 mchirp_name = 'mchirp' 896 897 if 'asym_massratio' in pos.names: 898 q_name = 'asym_massratio' 899 else: 900 q_name = 'q' 901 902 if 'sym_massratio' in pos.names: 903 eta_name= 'sym_massratio' 904 elif 'massratio' in pos.names: 905 eta_name= 'massratio' 906 else: 907 eta_name='eta' 908 909 if 'mass1' in pos.names and 'mass2' in pos.names : 910 pos.append_mapping(('m1','m2'),lambda x,y:(x,y) ,('mass1','mass2')) 911 912 if (mchirp_name in pos.names and eta_name in pos.names) and \ 913 ('mass1' not in pos.names or 'm1' not in pos.names) and \ 914 ('mass2' not in pos.names or 'm2' not in pos.names): 915 916 pos.append_mapping(('m1','m2'),mc2ms,(mchirp_name,eta_name)) 917 918 if (mchirp_name in pos.names and q_name in pos.names) and \ 919 ('mass1' not in pos.names or 'm1' not in pos.names) and \ 920 ('mass2' not in pos.names or 'm2' not in pos.names): 921 922 pos.append_mapping(('m1','m2'),q2ms,(mchirp_name,q_name)) 923 pos.append_mapping('eta',q2eta,(mchirp_name,q_name)) 924 925 if ('m1' in pos.names and 'm2' in pos.names and not 'mtotal' in pos.names ): 926 pos.append_mapping('mtotal', lambda m1,m2: m1+m2, ('m1','m2') ) 927 928 if('a_spin1' in pos.names): pos.append_mapping('a1',lambda a:a,'a_spin1') 929 if('a_spin2' in pos.names): pos.append_mapping('a2',lambda a:a,'a_spin2') 930 if('phi_spin1' in pos.names): pos.append_mapping('phi1',lambda a:a,'phi_spin1') 931 if('phi_spin2' in pos.names): pos.append_mapping('phi2',lambda a:a,'phi_spin2') 932 if('theta_spin1' in pos.names): pos.append_mapping('theta1',lambda a:a,'theta_spin1') 933 if('theta_spin2' in pos.names): pos.append_mapping('theta2',lambda a:a,'theta_spin2') 934 935 my_ifos=['h1','l1','v1'] 936 for ifo1,ifo2 in combinations(my_ifos,2): 937 p1=ifo1+'_cplx_snr_arg' 938 p2=ifo2+'_cplx_snr_arg' 939 if p1 in pos.names and p2 in pos.names: 940 delta=np.mod(pos[p1].samples - pos[p2].samples + np.pi ,2.0*np.pi)-np.pi 941 pos.append(PosteriorOneDPDF(ifo1+ifo2+'_relative_phase',delta)) 942 943 # Ensure that both theta_jn and inclination are output for runs 944 # with zero tilt (for runs with tilt, this will be taken care of 945 # below when the old spin angles are computed as functions of the 946 # new ones 947 # Disabled this since the parameters are degenerate and causing problems 948 #if ('theta_jn' in pos.names) and (not 'tilt1' in pos.names) and (not 'tilt2' in pos.names): 949 # pos.append_mapping('iota', lambda t:t, 'theta_jn') 950 951 # Compute time delays from sky position 952 try: 953 if ('ra' in pos.names or 'rightascension' in pos.names) \ 954 and ('declination' in pos.names or 'dec' in pos.names) \ 955 and 'time' in pos.names: 956 from pylal import xlal,inject 957 from pylal import date 958 from pylal.date import XLALTimeDelayFromEarthCenter 959 from pylal.xlal.datatypes.ligotimegps import LIGOTimeGPS 960 import itertools 961 from numpy import array 962 detMap = {'H1': 'LHO_4k', 'H2': 'LHO_2k', 'L1': 'LLO_4k', 963 'G1': 'GEO_600', 'V1': 'VIRGO', 'T1': 'TAMA_300'} 964 if 'ra' in pos.names: 965 ra_name='ra' 966 else: ra_name='rightascension' 967 if 'dec' in pos.names: 968 dec_name='dec' 969 else: dec_name='declination' 970 ifo_times={} 971 my_ifos=['H1','L1','V1'] 972 for ifo in my_ifos: 973 inj_time=None 974 if injection: 975 inj_time=float(injection.get_end(ifo[0])) 976 location=inject.cached_detector[detMap[ifo]].location 977 ifo_times[ifo]=array(map(lambda ra,dec,time: array([time[0]+XLALTimeDelayFromEarthCenter(location,ra[0],dec[0],LIGOTimeGPS(float(time[0])))]), pos[ra_name].samples,pos[dec_name].samples,pos['time'].samples)) 978 loc_end_time=PosteriorOneDPDF(ifo.lower()+'_end_time',ifo_times[ifo],injected_value=inj_time) 979 pos.append(loc_end_time) 980 for ifo1 in my_ifos: 981 for ifo2 in my_ifos: 982 if ifo1==ifo2: continue 983 delay_time=ifo_times[ifo2]-ifo_times[ifo1] 984 if injection: 985 inj_delay=float(injection.get_end(ifo2[0])-injection.get_end(ifo1[0])) 986 else: 987 inj_delay=None 988 time_delay=PosteriorOneDPDF(ifo1.lower()+ifo2.lower()+'_delay',delay_time,inj_delay) 989 pos.append(time_delay) 990 except ImportError: 991 print 'Warning: Could not import lal python bindings, check you ./configured with --enable-swig-python' 992 print 'This means I cannot calculate time delays' 993 994 #Calculate new spin angles 995 new_spin_params = ['tilt1','tilt2','theta_jn','beta'] 996 if not set(new_spin_params).issubset(set(pos.names)): 997 old_params = ['f_ref',mchirp_name,'eta','iota','a1','theta1','phi1'] 998 if 'a2' in pos.names: old_params += ['a2','theta2','phi2'] 999 try: 1000 pos.append_mapping(new_spin_params, spin_angles, old_params) 1001 except KeyError: 1002 print "Warning: Cannot find spin parameters. Skipping spin angle calculations." 1003 1004 #Calculate effective precessing spin magnitude 1005 if ('a1' in pos.names and 'tilt1' in pos.names and 'm1' in pos.names ) and ('a2' in pos.names and 'tilt2' in pos.names and 'm2' in pos.names): 1006 pos.append_mapping('chi_p', chi_precessing, ['a1', 'tilt1', 'm1', 'a2', 'tilt2', 'm2']) 1007 1008 1009 # Calculate redshift from luminosity distance measurements 1010 if('distance' in pos.names): 1011 pos.append_mapping('redshift', calculate_redshift, 'distance') 1012 1013 # Calculate source mass parameters 1014 if ('m1' in pos.names) and ('redshift' in pos.names): 1015 pos.append_mapping('m1_source', source_mass, ['m1', 'redshift']) 1016 1017 if ('m2' in pos.names) and ('redshift' in pos.names): 1018 pos.append_mapping('m2_source', source_mass, ['m2', 'redshift']) 1019 1020 if ('mtotal' in pos.names) and ('redshift' in pos.names): 1021 pos.append_mapping('mtotal_source', source_mass, ['mtotal', 'redshift']) 1022 1023 if ('mc' in pos.names) and ('redshift' in pos.names): 1024 pos.append_mapping('mc_source', source_mass, ['mc', 'redshift']) 1025 1026 #Store signed spin magnitudes in separate parameters and make a1,a2 magnitudes 1027 if 'a1' in pos.names: 1028 if 'tilt1' in pos.names: 1029 pos.append_mapping('a1z', lambda a, tilt: a*np.cos(tilt), ('a1','tilt1')) 1030 else: 1031 pos.append_mapping('a1z', lambda x: x, 'a1') 1032 inj_az = None 1033 if injection is not None: 1034 inj_az = injection.spin1z 1035 pos['a1z'].set_injval(inj_az) 1036 pos.pop('a1') 1037 pos.append_mapping('a1', lambda x: np.abs(x), 'a1z') 1038 1039 if 'a2' in pos.names: 1040 if 'tilt2' in pos.names: 1041 pos.append_mapping('a2z', lambda a, tilt: a*np.cos(tilt), ('a2','tilt2')) 1042 else: 1043 pos.append_mapping('a2z', lambda x: x, 'a2') 1044 inj_az = None 1045 if injection is not None: 1046 inj_az = injection.spin2z 1047 pos['a2z'].set_injval(inj_az) 1048 pos.pop('a2') 1049 pos.append_mapping('a2', lambda x: np.abs(x), 'a2z') 1050 1051 #Calculate effective spin parallel to L 1052 if ('m1' in pos.names and 'a1z' in pos.names) and ('m2' in pos.names and 'a2z' in pos.names): 1053 pos.append_mapping('chi_eff', lambda m1,a1z,m2,a2z: (m1*a1z + m2*a2z) / (m1 + m2), ('m1','a1z','m2','a2z')) 1054 1055 #If precessing spins calculate total effective spin 1056 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names): 1057 pos.append_mapping('chi_tot', lambda m1,a1,m2,a2: (m1*a1 + m2*a2) / (m1 + m2), ('m1','a1','m2','a2')) 1058 1059 #Calculate effective precessing spin magnitude 1060 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names): 1061 pos.append_mapping('chi_p', chi_precessing, ['m1', 'a1', 'tilt1', 'm2', 'a2', 'tilt2']) 1062 1063 # Calculate redshift from luminosity distance measurements 1064 if('distance' in pos.names): 1065 pos.append_mapping('redshift', calculate_redshift, 'distance') 1066 1067 # Calculate source mass parameters 1068 if ('m1' in pos.names) and ('redshift' in pos.names): 1069 pos.append_mapping('m1_source', source_mass, ['m1', 'redshift']) 1070 1071 if ('m2' in pos.names) and ('redshift' in pos.names): 1072 pos.append_mapping('m2_source', source_mass, ['m2', 'redshift']) 1073 1074 if ('mtotal' in pos.names) and ('redshift' in pos.names): 1075 pos.append_mapping('mtotal_source', source_mass, ['mtotal', 'redshift']) 1076 1077 if ('mc' in pos.names) and ('redshift' in pos.names): 1078 pos.append_mapping('mc_source', source_mass, ['mc', 'redshift']) 1079 1080 #Calculate new tidal parameters 1081 new_tidal_params = ['lam_tilde','dlam_tilde'] 1082 old_tidal_params = ['lambda1','lambda2','eta'] 1083 if 'lambda1' in pos.names or 'lambda2' in pos.names: 1084 try: 1085 pos.append_mapping(new_tidal_params, symm_tidal_params, old_tidal_params) 1086 except KeyError: 1087 print "Warning: Cannot find tidal parameters. Skipping tidal calculations." 1088 1089 #If new spin params present, calculate old ones 1090 old_spin_params = ['iota', 'theta1', 'phi1', 'theta2', 'phi2', 'beta'] 1091 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', 'f_ref'] 1092 try: 1093 if pos['f_ref'].samples[0][0]==0.0: 1094 for name in ['flow','f_lower']: 1095 if name in pos.names: 1096 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', name] 1097 except: 1098 print "No f_ref for SimInspiralTransformPrecessingNewInitialConditions()." 1099 if set(new_spin_params).issubset(set(pos.names)) and not set(old_spin_params).issubset(set(pos.names)): 1100 pos.append_mapping(old_spin_params, physical2radiationFrame, new_spin_params) 1101 1102 #Calculate spin magnitudes for aligned runs 1103 if 'spin1' in pos.names: 1104 inj_a1 = inj_a2 = None 1105 if injection: 1106 inj_a1 = sqrt(injection.spin1x*injection.spin1x + injection.spin1y*injection.spin1y + injection.spin1z*injection.spin1z) 1107 inj_a2 = sqrt(injection.spin2x*injection.spin2x + injection.spin2y*injection.spin2y + injection.spin2z*injection.spin2z) 1108 1109 try: 1110 a1_samps = abs(pos['spin1'].samples) 1111 a1_pos = PosteriorOneDPDF('a1',a1_samps,injected_value=inj_a1) 1112 pos.append(a1_pos) 1113 except KeyError: 1114 print "Warning: problem accessing spin1 values." 1115 1116 try: 1117 a2_samps = abs(pos['spin2'].samples) 1118 a2_pos = PosteriorOneDPDF('a2',a2_samps,injected_value=inj_a2) 1119 pos.append(a2_pos) 1120 except KeyError: 1121 print "Warning: no spin2 values found." 1122 1123 # Calculate mass and spin of final merged system 1124 if ('m1' in pos.names) and ('m2' in pos.names): 1125 if ('tilt1' in pos.names) or ('tilt2' in pos.names): 1126 print "A precessing fit formula is not available yet. Using a non-precessing fit formula on the aligned-spin components." 1127 if ('a1z' in pos.names) and ('a2z' in pos.names): 1128 print "Using non-precessing fit formula [Healy at al (2014)] for final mass and spin (on masses and projected spin components)." 1129 try: 1130 pos.append_mapping('af', bbh_final_spin_non_precessing_Healyetal, ['m1', 'm2', 'a1z', 'a2z']) 1131 pos.append_mapping('mf', lambda m1, m2, chi1z, chi2z, chif: bbh_final_mass_non_precessing_Healyetal(m1, m2, chi1z, chi2z, chif=chif), ['m1', 'm2', 'a1z', 'a2z', 'af']) 1132 except Exception,e: 1133 print "Could not calculate final parameters. The error was: %s"%(str(e)) 1134 elif ('a1' in pos.names) and ('a2' in pos.names): 1135 if ('tilt1' in pos.names) and ('tilt2' in pos.names): 1136 print "Projecting spin and using non-precessing fit formula [Healy at al (2014)] for final mass and spin." 1137 try: 1138 pos.append_mapping('af', bbh_final_spin_projected_spin_Healyetal, ['m1', 'm2', 'a1', 'a2', 'tilt1', 'tilt2']) 1139 pos.append_mapping('mf', bbh_final_mass_projected_spin_Healyetal, ['m1', 'm2', 'a1', 'a2', 'tilt1', 'tilt2', 'af']) 1140 except Exception,e: 1141 print "Could not calculate final parameters. The error was: %s"%(str(e)) 1142 else: 1143 print "Using non-precessing fit formula [Healy at al (2014)] for final mass and spin (on masses and spin magnitudes)." 1144 try: 1145 pos.append_mapping('af', bbh_final_spin_non_precessing_Healyetal, ['m1', 'm2', 'a1', 'a2']) 1146 pos.append_mapping('mf', lambda m1, m2, chi1, chi2, chif: bbh_final_mass_non_precessing_Healyetal(m1, m2, chi1, chi2, chif=chif), ['m1', 'm2', 'a1', 'a2', 'af']) 1147 except Exception,e: 1148 print "Could not calculate final parameters. The error was: %s"%(str(e)) 1149 else: 1150 print "Using non-spinning fit formula [Pan at al (2010)] for final mass and spin." 1151 try: 1152 pos.append_mapping('af', bbh_final_spin_non_spinning_Panetal, ['m1', 'm2']) 1153 pos.append_mapping('mf', bbh_final_mass_non_spinning_Panetal, ['m1', 'm2']) 1154 except Exception,e: 1155 print "Could not calculate final parameters. The error was: %s"%(str(e)) 1156 if ('mf' in pos.names) and ('redshift' in pos.names): 1157 try: 1158 pos.append_mapping('mf_source', source_mass, ['mf', 'redshift']) 1159 except Exception,e: 1160 print "Could not calculate final source frame mass. The error was: %s"%(str(e)) 1161 1162 # Calculate radiated energy and peak luminosity 1163 if ('mtotal_source' in pos.names) and ('mf_source' in pos.names): 1164 try: 1165 pos.append_mapping('e_rad', lambda mtot_s, mf_s: mtot_s-mf_s, ['mtotal_source', 'mf_source']) 1166 except Exception,e: 1167 print "Could not calculate radiated energy. The error was: %s"%(str(e)) 1168 1169 if ('q' in pos.names) and ('a1z' in pos.names) and ('a2z' in pos.names): 1170 try: 1171 pos.append_mapping('l_peak', bbh_aligned_Lpeak_6mode_SHXJDK, ['q', 'a1z', 'a2z']) 1172 except Exception,e: 1173 print "Could not calculate peak luminosity. The error was: %s"%(str(e))
1174
1175 - def bootstrap(self):
1176 """ 1177 Returns a new Posterior object that contains a bootstrap 1178 sample of self. 1179 1180 @rtype: Posterior 1181 """ 1182 names=[] 1183 samples=[] 1184 for name,oneDpos in self._posterior.items(): 1185 names.append(name) 1186 samples.append(oneDpos.samples) 1187 1188 samplesBlock=np.hstack(samples) 1189 1190 bootstrapSamples=samplesBlock[:,:] 1191 Nsamp=bootstrapSamples.shape[0] 1192 1193 rows=np.vsplit(samplesBlock,Nsamp) 1194 1195 for i in range(Nsamp): 1196 bootstrapSamples[i,:]=random.choice(rows) 1197 1198 return Posterior((names,bootstrapSamples),self._injection,self._triggers)
1199
1200 - def delete_samples_by_idx(self,samples):
1201 """ 1202 Remove samples from all OneDPosteriors. 1203 1204 @param samples: The indexes of the samples to be removed. 1205 @type samples: list 1206 """ 1207 for name,pos in self: 1208 pos.delete_samples_by_idx(samples) 1209 return
1210
1211 - def delete_NaN_entries(self,param_list):
1212 """ 1213 Remove samples containing NaN in request params. 1214 1215 @param param_list: The parameters to be checked for NaNs. 1216 @type param_list: list 1217 """ 1218 nan_idxs = np.array(()) 1219 nan_dict = {} 1220 for param in param_list: 1221 nan_bool_array = np.isnan(self[param].samples).any(1) 1222 idxs = np.where(nan_bool_array == True)[0] 1223 if len(idxs) > 0: 1224 nan_dict[param]=len(idxs) 1225 nan_idxs = np.append(nan_idxs, idxs) 1226 total_samps = len(self) 1227 nan_samps = len(nan_idxs) 1228 if nan_samps is not 0: 1229 print "WARNING: removing %i of %i total samples due to NaNs:"% (nan_samps,total_samps) 1230 for param in nan_dict.keys(): 1231 print "\t%i NaNs in %s."%(nan_dict[param],param) 1232 self.delete_samples_by_idx(nan_idxs) 1233 return
1234 1235 @property
1236 - def DIC(self):
1237 """Returns the Deviance Information Criterion estimated from the 1238 posterior samples. The DIC is defined as -2*(<log(L)> - 1239 Var(log(L))); smaller values are "better." 1240 1241 """ 1242 1243 return -2.0*(np.mean(self._logL) - np.var(self._logL))
1244 1245 @property
1246 - def injection(self):
1247 """ 1248 Return the injected values. 1249 1250 @rtype: glue.ligolw.lsctables.SimInspiral 1251 """ 1252 1253 return self._injection
1254 1255 @property
1256 - def triggers(self):
1257 """ 1258 Return the trigger values . 1259 1260 @rtype: list 1261 """ 1262 1263 return self._triggers
1264
1265 - def _total_incl_restarts(self, samples):
1266 total=0 1267 last=samples[0] 1268 for x in samples[1:]: 1269 if x < last: 1270 total += last 1271 last = x 1272 total += samples[-1] 1273 return total
1274
1275 - def longest_chain_cycles(self):
1276 """ 1277 Returns the number of cycles in the longest chain 1278 1279 @rtype: number 1280 """ 1281 samps,header=self.samples() 1282 header=header.split() 1283 if not ('cycle' in header): 1284 raise RuntimeError("Cannot compute number of cycles in longest chain") 1285 1286 cycle_col=header.index('cycle') 1287 if 'chain' in header: 1288 chain_col=header.index('chain') 1289 chain_indexes=np.unique(samps[:,chain_col]) 1290 max_cycle=0 1291 for ind in chain_indexes: 1292 chain_cycle_samps=samps[ samps[:,chain_col] == ind, cycle_col ] 1293 max_cycle=max(max_cycle, self._total_incl_restarts(chain_cycle_samps)) 1294 return int(max_cycle) 1295 else: 1296 return int(self._total_incl_restarts(samps[:,cycle_col]))
1297 1298 #@injection.setter #Python 2.6+
1299 - def set_injection(self,injection):
1300 """ 1301 Set the injected values of the parameters. 1302 1303 @param injection: A SimInspiralTable row object containing the injected parameters. 1304 @type injection: glue.ligolw.lsctables.SimInspiral 1305 """ 1306 if injection is not None: 1307 self._injection=injection 1308 for name,onepos in self: 1309 new_injval=self._getinjpar(name) 1310 if new_injval is not None: 1311 self[name].set_injval(new_injval)
1312
1313 - def set_triggers(self,triggers):
1314 """ 1315 Set the trigger values of the parameters. 1316 1317 @param triggers: A list of SnglInspiral objects. 1318 @type triggers: list 1319 """ 1320 if triggers is not None: 1321 self._triggers=triggers 1322 for name,onepos in self: 1323 new_trigvals=self._gettrigpar(name) 1324 if new_trigvals is not None: 1325 self[name].set_trigvals(new_trigvals)
1326 1327
1328 - def _getinjpar(self,paramname):
1329 """ 1330 Map parameter names to parameters in a SimInspiralTable . 1331 """ 1332 if self._injection is not None: 1333 for key,value in self._injXMLFuncMap.items(): 1334 if paramname.lower().strip() == key.lower().strip(): 1335 try: 1336 return self._injXMLFuncMap[key](self._injection) 1337 except TypeError: 1338 return self._injXMLFuncMap[key] 1339 return None
1340
1341 - def _gettrigpar(self,paramname):
1342 """ 1343 Map parameter names to parameters in a SnglInspiral. 1344 """ 1345 vals = None 1346 if self._triggers is not None: 1347 for key,value in self._injXMLFuncMap.items(): 1348 if paramname.lower().strip() == key.lower().strip(): 1349 try: 1350 vals = dict([(trig.ifo,self._injXMLFuncMap[key](trig)) for trig in self._triggers]) 1351 except TypeError: 1352 return self._injXMLFuncMap[key] 1353 except AttributeError: 1354 return None 1355 return vals
1356
1357 - def __getitem__(self,key):
1358 """ 1359 Container method . Returns posterior chain,one_d_pos, with name one_d_pos.name. 1360 """ 1361 return self._posterior[key.lower()]
1362
1363 - def __len__(self):
1364 """ 1365 Container method. Defined as number of samples. 1366 """ 1367 return len(self._logL)
1368
1369 - def __iter__(self):
1370 """ 1371 Container method. Returns iterator from self.forward for use in 1372 for (...) in (...) etc. 1373 """ 1374 return self.forward()
1375
1376 - def forward(self):
1377 """ 1378 Generate a forward iterator (in sense of list of names) over Posterior 1379 with name,one_d_pos. 1380 """ 1381 current_item = 0 1382 while current_item < self.dim: 1383 name=self._posterior.keys()[current_item] 1384 pos=self._posterior[name] 1385 current_item += 1 1386 yield name,pos
1387
1388 - def bySample(self):
1389 """ 1390 Generate a forward iterator over the list of samples corresponding to 1391 the data stored within the Posterior instance. These are returned as 1392 ParameterSamples instances. 1393 """ 1394 current_item=0 1395 pos_array,header=self.samples 1396 while current_item < len(self): 1397 sample_array=(np.squeeze(pos_array[current_item,:])) 1398 yield PosteriorSample(sample_array, header, header) 1399 current_item += 1
1400 1401 1402 @property
1403 - def dim(self):
1404 """ 1405 Return number of parameters. 1406 """ 1407 return len(self._posterior.keys())
1408 1409 @property
1410 - def names(self):
1411 """ 1412 Return list of parameter names. 1413 """ 1414 nameslist=[] 1415 for key,value in self: 1416 nameslist.append(key) 1417 return nameslist
1418 1419 @property
1420 - def means(self):
1421 """ 1422 Return dict {paramName:paramMean} . 1423 """ 1424 meansdict={} 1425 for name,pos in self: 1426 meansdict[name]=pos.mean 1427 return meansdict
1428 1429 @property
1430 - def medians(self):
1431 """ 1432 Return dict {paramName:paramMedian} . 1433 """ 1434 mediansdict={} 1435 for name,pos in self: 1436 mediansdict[name]=pos.median 1437 return mediansdict
1438 1439 @property
1440 - def stdevs(self):
1441 """ 1442 Return dict {paramName:paramStandardDeviation} . 1443 """ 1444 stdsdict={} 1445 for name,pos in self: 1446 stdsdict[name]=pos.stdev 1447 return stdsdict
1448 1449 @property
1450 - def name(self):
1451 """ 1452 Return qualified string containing the 'name' of the Posterior instance. 1453 """ 1454 return self.__name
1455 1456 @property
1457 - def description(self):
1458 """ 1459 Return qualified string containing a 'description' of the Posterior instance. 1460 """ 1461 return self.__description
1462
1463 - def append(self,one_d_posterior):
1464 """ 1465 Container method. Add a new OneDParameter to the Posterior instance. 1466 """ 1467 self._posterior[one_d_posterior.name]=one_d_posterior 1468 return
1469
1470 - def pop(self,param_name):
1471 """ 1472 Container method. Remove PosteriorOneDPDF from the Posterior instance. 1473 """ 1474 return self._posterior.pop(param_name)
1475
1476 - def append_mapping(self, new_param_names, func, post_names):
1477 """ 1478 Append posteriors pos1,pos2,...=func(post_names) 1479 """ 1480 # deepcopy 1D posteriors to ensure mapping function doesn't modify the originals 1481 import copy 1482 #1D input 1483 if isinstance(post_names, str): 1484 old_post = copy.deepcopy(self[post_names]) 1485 old_inj = old_post.injval 1486 old_trigs = old_post.trigvals 1487 if old_inj: 1488 new_inj = func(old_inj) 1489 else: 1490 new_inj = None 1491 if old_trigs: 1492 new_trigs = {} 1493 for IFO in old_trigs.keys(): 1494 new_trigs[IFO] = func(old_trigs[IFO]) 1495 else: 1496 new_trigs = None 1497 1498 samps = func(old_post.samples) 1499 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=new_inj, trigger_values=new_trigs) 1500 if new_post.samples.ndim is 0: 1501 print "WARNING: No posterior calculated for %s ..." % post.name 1502 else: 1503 self.append(new_post) 1504 #MultiD input 1505 else: 1506 old_posts = [copy.deepcopy(self[post_name]) for post_name in post_names] 1507 old_injs = [post.injval for post in old_posts] 1508 old_trigs = [post.trigvals for post in old_posts] 1509 samps = func(*[post.samples for post in old_posts]) 1510 #1D output 1511 if isinstance(new_param_names, str): 1512 if None not in old_injs: 1513 inj = func(*old_injs) 1514 else: 1515 inj = None 1516 if None not in old_trigs: 1517 new_trigs = {} 1518 for IFO in old_trigs[0].keys(): 1519 oldvals = [param[IFO] for param in old_trigs] 1520 new_trigs[IFO] = func(*oldvals) 1521 else: 1522 new_trigs = None 1523 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=inj, trigger_values=new_trigs) 1524 self.append(new_post) 1525 #MultiD output 1526 else: 1527 if None not in old_injs: 1528 injs = func(*old_injs) 1529 else: 1530 injs = [None for name in new_param_names] 1531 if None not in old_trigs: 1532 new_trigs = [{} for param in range(len(new_param_names))] 1533 for IFO in old_trigs[0].keys(): 1534 oldvals = [param[IFO] for param in old_trigs] 1535 newvals = func(*oldvals) 1536 for param,newval in enumerate(newvals): 1537 new_trigs[param][IFO] = newval 1538 else: 1539 new_trigs = [None for param in range(len(new_param_names))] 1540 if not samps: return() # Something went wrong 1541 new_posts = [PosteriorOneDPDF(new_param_name,samp,injected_value=inj,trigger_values=new_trigs) for (new_param_name,samp,inj,new_trigs) in zip(new_param_names,samps,injs,new_trigs)] 1542 for post in new_posts: 1543 if post.samples.ndim is 0: 1544 print "WARNING: No posterior calculated for %s ..." % post.name 1545 else: 1546 self.append(post) 1547 return
1548
1549 - def _average_posterior(self, samples, post_name):
1550 """ 1551 Returns the average value of the 'post_name' column of the 1552 given samples. 1553 """ 1554 ap = 0.0 1555 for samp in samples: 1556 ap = ap + samp[post_name] 1557 return ap / len(samples)
1558
1559 - def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias = 0):
1560 """ 1561 Returns the average value of the posterior assuming that the 1562 'logl_name' column contains log(L) and the 'prior_name' column 1563 contains the prior (un-logged). 1564 """ 1565 ap = 0.0 1566 for samp in samples: 1567 ap += np.exp(samp[logl_name]-log_bias)*samp[prior_name] 1568 return ap / len(samples)
1569
1570 - def _bias_factor(self):
1571 """ 1572 Returns a sensible bias factor for the evidence so that 1573 integrals are representable as doubles. 1574 """ 1575 return np.mean(self._logL)
1576
1577 - def di_evidence(self, boxing=64):
1578 """ 1579 Returns the log of the direct-integration evidence for the 1580 posterior samples. 1581 """ 1582 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2", 1583 "iota", "psi", "ra", "dec", 1584 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"] 1585 samples,header=self.samples() 1586 header=header.split() 1587 coord_names=[name for name in allowed_coord_names if name in header] 1588 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples] 1589 tree=KDTree(coordinatized_samples) 1590 1591 if "prior" in header and "logl" in header: 1592 bf = self._bias_factor() 1593 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "logl", "prior", bf), boxing)) 1594 elif "prior" in header and "likelihood" in header: 1595 bf = self._bias_factor() 1596 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "likelihood", "prior", bf), boxing)) 1597 elif "post" in header: 1598 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "post"), boxing)) 1599 elif "posterior" in header: 1600 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "posterior"), boxing)) 1601 else: 1602 raise RuntimeError("could not find 'post', 'posterior', 'logl' and 'prior', or 'likelihood' and 'prior' columns in output to compute direct integration evidence")
1603
1605 """Returns an approximation to the log(evidence) obtained by 1606 fitting an ellipse around the highest-posterior samples and 1607 performing the harmonic mean approximation within the ellipse. 1608 Because the ellipse should be well-sampled, this provides a 1609 better approximation to the evidence than the full-domain HM.""" 1610 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2", 1611 "iota", "psi", "ra", "dec", 1612 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"] 1613 samples,header=self.samples() 1614 header=header.split() 1615 1616 n=int(0.05*samples.shape[0]) 1617 if not n > 1: 1618 raise IndexError 1619 1620 coord_names=[name for name in allowed_coord_names if name in header] 1621 indexes=np.argsort(self._logL[:,0]) 1622 1623 my_samples=samples[indexes[-n:], :] # The highest posterior samples. 1624 my_samples=np.array([PosteriorSample(sample,header,coord_names).coord() for sample in my_samples]) 1625 1626 mu=np.mean(my_samples, axis=0) 1627 cov=np.cov(my_samples, rowvar=0) 1628 1629 d0=None 1630 for mysample in my_samples: 1631 d=np.dot(mysample-mu, np.linalg.solve(cov, mysample-mu)) 1632 if d0 is None: 1633 d0 = d 1634 else: 1635 d0=max(d0,d) 1636 1637 ellipse_logl=[] 1638 ellipse_samples=[] 1639 for sample,logl in zip(samples, self._logL): 1640 coord=PosteriorSample(sample, header, coord_names).coord() 1641 d=np.dot(coord-mu, np.linalg.solve(cov, coord-mu)) 1642 1643 if d <= d0: 1644 ellipse_logl.append(logl) 1645 ellipse_samples.append(sample) 1646 1647 if len(ellipse_samples) > 5*n: 1648 print 'WARNING: ellpise evidence region encloses significantly more samples than %d'%n 1649 1650 ellipse_samples=np.array(ellipse_samples) 1651 ellipse_logl=np.array(ellipse_logl) 1652 1653 ndim = len(coord_names) 1654 ellipse_volume=np.pi**(ndim/2.0)*d0**(ndim/2.0)/special.gamma(ndim/2.0+1)*np.sqrt(np.linalg.det(cov)) 1655 1656 try: 1657 prior_index=header.index('prior') 1658 pmu=np.mean(ellipse_samples[:,prior_index]) 1659 pstd=np.std(ellipse_samples[:,prior_index]) 1660 if pstd/pmu > 1.0: 1661 print 'WARNING: prior variation greater than 100\% over elliptical volume.' 1662 approx_prior_integral=ellipse_volume*pmu 1663 except KeyError: 1664 # Maybe prior = 1? 1665 approx_prior_integral=ellipse_volume 1666 1667 ll_bias=np.mean(ellipse_logl) 1668 ellipse_logl = ellipse_logl - ll_bias 1669 1670 return np.log(approx_prior_integral) - np.log(np.mean(1.0/np.exp(ellipse_logl))) + ll_bias
1671
1672 - def harmonic_mean_evidence(self):
1673 """ 1674 Returns the log of the harmonic mean evidence for the set of 1675 posterior samples. 1676 """ 1677 bf = self._bias_factor() 1678 return bf + np.log(1/np.mean(1/np.exp(self._logL-bf)))
1679
1680 - def _posMaxL(self):
1681 """ 1682 Find the sample with maximum likelihood probability. Returns value 1683 of likelihood and index of sample . 1684 """ 1685 logl_vals=self._logL 1686 max_i=0 1687 max_logl=logl_vals[0] 1688 for i in range(len(logl_vals)): 1689 if logl_vals[i] > max_logl: 1690 max_logl=logl_vals[i] 1691 max_i=i 1692 return max_logl,max_i
1693
1694 - def _posMap(self):
1695 """ 1696 Find the sample with maximum a posteriori probability. Returns value 1697 of posterior and index of sample . 1698 """ 1699 logl_vals=self._logL 1700 if self._logP is not None: 1701 logp_vals=self._logP 1702 else: 1703 return None 1704 1705 max_i=0 1706 max_pos=logl_vals[0]+logp_vals[0] 1707 for i in range(len(logl_vals)): 1708 if logl_vals[i]+logp_vals[i] > max_pos: 1709 max_pos=logl_vals[i]+logp_vals[i] 1710 max_i=i 1711 return max_pos,max_i
1712
1713 - def _print_table_row(self,name,entries):
1714 """ 1715 Print a html table row representation of 1716 1717 name:item1,item2,item3,... 1718 """ 1719 1720 row_str='<tr><td>%s</td>'%name 1721 for entry in entries: 1722 row_str+='<td>%s</td>'%entry 1723 row_str+='</tr>' 1724 return row_str
1725 1726 @property
1727 - def maxL(self):
1728 """ 1729 Return the maximum likelihood probability and the corresponding 1730 set of parameters. 1731 """ 1732 maxLvals={} 1733 max_logl,max_i=self._posMaxL() 1734 for param_name in self.names: 1735 maxLvals[param_name]=self._posterior[param_name].samples[max_i][0] 1736 1737 return (max_logl,maxLvals)
1738 1739 @property
1740 - def maxP(self):
1741 """ 1742 Return the maximum a posteriori probability and the corresponding 1743 set of parameters. 1744 """ 1745 maxPvals={} 1746 max_pos,max_i=self._posMap() 1747 for param_name in self.names: 1748 maxPvals[param_name]=self._posterior[param_name].samples[max_i][0] 1749 1750 return (max_pos,maxPvals)
1751 1752
1753 - def samples(self):
1754 """ 1755 Return an (M,N) numpy.array of posterior samples; M = len(self); 1756 N = dim(self) . 1757 """ 1758 header_string='' 1759 posterior_table=[] 1760 for param_name,one_pos in self: 1761 column=np.array(one_pos.samples) 1762 header_string+=param_name+'\t' 1763 posterior_table.append(column) 1764 posterior_table=tuple(posterior_table) 1765 return np.column_stack(posterior_table),header_string
1766
1767 - def write_to_file(self,fname):
1768 """ 1769 Dump the posterior table to a file in the 'common format'. 1770 """ 1771 column_list=() 1772 1773 posterior_table,header_string=self.samples() 1774 1775 fobj=open(fname,'w') 1776 1777 fobj.write(header_string+'\n') 1778 np.savetxt(fobj,posterior_table,delimiter='\t') 1779 fobj.close() 1780 1781 return
1782
1783 - def gelman_rubin(self, pname):
1784 """ 1785 Returns an approximation to the Gelman-Rubin statistic (see 1786 Gelman, A. and Rubin, D. B., Statistical Science, Vol 7, 1787 No. 4, pp. 457--511 (1992)) for the parameter given, accurate 1788 as the number of samples in each chain goes to infinity. The 1789 posterior samples must have a column named 'chain' so that the 1790 different chains can be separated. 1791 """ 1792 from numpy import seterr as np_seterr 1793 np_seterr(all='raise') 1794 1795 if "chain" in self.names: 1796 chains=np.unique(self["chain"].samples) 1797 chain_index=self.names.index("chain") 1798 param_index=self.names.index(pname) 1799 data,header=self.samples() 1800 chainData=[data[ data[:,chain_index] == chain, param_index] for chain in chains] 1801 allData=np.concatenate(chainData) 1802 chainMeans=[np.mean(data) for data in chainData] 1803 chainVars=[np.var(data) for data in chainData] 1804 BoverN=np.var(chainMeans) 1805 W=np.mean(chainVars) 1806 sigmaHat2=W + BoverN 1807 m=len(chainData) 1808 VHat=sigmaHat2 + BoverN/m 1809 try: 1810 R = VHat/W 1811 except: 1812 print "Error when computer Gelman-Rubin R statistic for %s. This may be a fixed parameter"%pname 1813 R = np.nan 1814 return R 1815 else: 1816 raise RuntimeError('could not find necessary column header "chain" in posterior samples')
1817
1818 - def healpix_map(self, resol, nest=True):
1819 """Returns a healpix map in the pixel ordering that represents the 1820 posterior density (per square degree) on the sky. The pixels 1821 will be chosen to have at least the given resolution (in 1822 degrees). 1823 1824 """ 1825 1826 # Ensure that the resolution is twice the desired 1827 nside = 2 1828 while hp.nside2resol(nside, arcmin=True) > resol*60.0/2.0: 1829 nside *= 2 1830 1831 ras = self['ra'].samples.squeeze() 1832 decs = self['dec'].samples.squeeze() 1833 1834 phis = ras 1835 thetas = np.pi/2.0 - decs 1836 1837 # Create the map in ring ordering 1838 inds = hp.ang2pix(nside, thetas, phis, nest=False) 1839 counts = np.bincount(inds) 1840 if counts.shape[0] < hp.nside2npix(nside): 1841 counts = np.concatenate((counts, np.zeros(hp.nside2npix(nside) - counts.shape[0]))) 1842 1843 # Smooth the map a bit (Gaussian sigma = resol) 1844 hpmap = hp.sphtfunc.smoothing(counts, sigma=resol*np.pi/180.0) 1845 1846 hpmap = hpmap / (np.sum(hpmap)*hp.nside2pixarea(nside, degrees=True)) 1847 1848 if nest: 1849 hpmap = hp.reorder(hpmap, r2n=True) 1850 1851 return hpmap
1852
1853 - def __str__(self):
1854 """ 1855 Define a string representation of the Posterior class ; returns 1856 a html formatted table of various properties of posteriors. 1857 """ 1858 return_val='<table border="1" id="statstable"><tr><th/>' 1859 column_names=['maP','maxL','stdev','mean','median','stacc','injection value'] 1860 IFOs = [] 1861 if self._triggers is not None: 1862 IFOs = [trig.ifo for trig in self._triggers] 1863 for IFO in IFOs: 1864 column_names.append(IFO+' trigger values') 1865 1866 for column_name in column_names: 1867 return_val+='<th>%s</th>'%column_name 1868 1869 return_val+='</tr>' 1870 1871 for name,oned_pos in self: 1872 1873 max_logl,max_i=self._posMaxL() 1874 maxL=oned_pos.samples[max_i][0] 1875 max_post,max_j=self._posMap() 1876 maP=oned_pos.samples[max_j][0] 1877 mean=str(oned_pos.mean) 1878 stdev=str(oned_pos.stdev) 1879 median=str(np.squeeze(oned_pos.median)) 1880 stacc=str(oned_pos.stacc) 1881 injval=str(oned_pos.injval) 1882 trigvals=oned_pos.trigvals 1883 1884 row = [maP,maxL,stdev,mean,median,stacc,injval] 1885 if self._triggers is not None: 1886 for IFO in IFOs: 1887 try: 1888 row.append(str(trigvals[IFO])) 1889 except TypeError: 1890 row.append(None) 1891 return_val+=self._print_table_row(name,row) 1892 1893 return_val+='</table>' 1894 1895 parser=XMLParser() 1896 parser.feed(return_val) 1897 Estr=parser.close() 1898 1899 elem=Estr 1900 rough_string = tostring(elem, 'utf-8') 1901 reparsed = minidom.parseString(rough_string) 1902 return_val=reparsed.toprettyxml(indent=" ") 1903 return return_val[len('<?xml version="1.0" ?>')+1:]
1904
1905 - def write_vot_info(self):
1906 """ 1907 Writes the information stored in the VOTtree if there is one 1908 """ 1909 target=VOT2HTML() 1910 parser=XMLParser(target=target) 1911 parser.feed(self._votfile) 1912 return parser.close()
1913 1914 #=============================================================================== 1915 # Functions used to parse injection structure. 1916 #===============================================================================
1917 - def _inj_m1(self,inj):
1918 """ 1919 Return the mapping of (mchirp,eta)->m1; m1>m2 i.e. return the greater of the mass 1920 components (m1) calculated from the chirp mass and the symmetric mass ratio. 1921 1922 @type inj: glue.ligolw.lsctables.SimInspiral 1923 @param inj: a custom type with the attributes 'mchirp' and 'eta'. 1924 @rtype: number 1925 """ 1926 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta) 1927 return mass1
1928
1929 - def _inj_m2(self,inj):
1930 """ 1931 Return the mapping of (mchirp,eta)->m2; m1>m2 i.e. return the lesser of the mass 1932 components (m2) calculated from the chirp mass and the symmetric mass ratio. 1933 1934 @type inj: glue.ligolw.lsctables.SimInspiral 1935 @param inj: a custom type with the attributes 'mchirp' and 'eta'. 1936 @rtype: number 1937 """ 1938 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta) 1939 return mass2
1940
1941 - def _inj_q(self,inj):
1942 """ 1943 Return the mapping of (mchirp,eta)->q; m1>m2 i.e. return the mass ratio q=m2/m1. 1944 1945 @type inj: glue.ligolw.lsctables.SimInspiral 1946 @param inj: a custom type with the attributes 'mchirp' and 'eta'. 1947 @rtype: number 1948 """ 1949 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta) 1950 return mass2/mass1
1951
1952 - def _inj_longitude(self,inj):
1953 """ 1954 Return the mapping of longitude found in inj to the interval [0,2*pi). 1955 1956 @type inj: glue.ligolw.lsctables.SimInspiral 1957 @param inj: a custom type with the attribute 'longitude'. 1958 @rtype: number 1959 """ 1960 if inj.longitude>2*pi_constant or inj.longitude<0.0: 1961 maplong=2*pi_constant*(((float(inj.longitude))/(2*pi_constant)) - floor(((float(inj.longitude))/(2*pi_constant)))) 1962 print "Warning: Injected longitude/ra (%s) is not within [0,2\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\pi). Mapped value is: %s."%(str(inj.longitude),str(maplong)) 1963 return maplong 1964 else: 1965 return inj.longitude
1966
1967 - def _inj_spins(self, inj, frame='OrbitalL'):
1968 spins = {} 1969 f_ref = self._injFref 1970 1971 if not inj: 1972 spins = {} 1973 1974 else: 1975 axis = lalsim.SimInspiralGetFrameAxisFromString(frame) 1976 1977 m1, m2 = inj.mass1, inj.mass2 1978 mc, eta = inj.mchirp, inj.eta 1979 1980 # Convert to radiation frame 1981 iota, s1x, s1y, s1z, s2x, s2y, s2z = \ 1982 lalsim.SimInspiralInitialConditionsPrecessingApproxs(inj.inclination, 1983 inj.spin1x, inj.spin1y, inj.spin1z, 1984 inj.spin2x, inj.spin2y, inj.spin2z, 1985 m1*lal.MSUN_SI, m2*lal.MSUN_SI, f_ref, inj.coa_phase, axis) 1986 1987 a1, theta1, phi1 = cart2sph(s1x, s1y, s1z) 1988 a2, theta2, phi2 = cart2sph(s2x, s2y, s2z) 1989 1990 spins = {'a1':a1, 'theta1':theta1, 'phi1':phi1, 1991 'a2':a2, 'theta2':theta2, 'phi2':phi2, 1992 'iota':iota} 1993 1994 # If spins are aligned, save the sign of the z-component 1995 if inj.spin1x == inj.spin1y == inj.spin2x == inj.spin2y == 0.: 1996 spins['a1z'] = inj.spin1z 1997 spins['a2z'] = inj.spin2z 1998 1999 L = orbital_momentum(f_ref, m1,m2, iota) 2000 S1 = np.hstack((s1x, s1y, s1z)) 2001 S2 = np.hstack((s2x, s2y, s2z)) 2002 2003 zhat = np.array([0., 0., 1.]) 2004 aligned_comp_spin1 = array_dot(S1, zhat) 2005 aligned_comp_spin2 = array_dot(S2, zhat) 2006 chi = aligned_comp_spin1 + aligned_comp_spin2 + \ 2007 np.sqrt(1. - 4.*eta) * (aligned_comp_spin1 - aligned_comp_spin2) 2008 2009 spins['spinchi'] = chi 2010 2011 S1 *= m1**2 2012 S2 *= m2**2 2013 J = L + S1 + S2 2014 2015 tilt1 = array_ang_sep(L, S1) 2016 tilt2 = array_ang_sep(L, S2) 2017 beta = array_ang_sep(J, L) 2018 2019 spins['tilt1'] = tilt1 2020 spins['tilt2'] = tilt2 2021 spins['beta'] = beta 2022 2023 # Need to do rotations of XLALSimInspiralTransformPrecessingInitialConditioin inverse order to go in the L frame 2024 # first rotation: bring J in the N-x plane, with negative x component 2025 phi0 = np.arctan2(J[1], J[0]) 2026 phi0 = np.pi - phi0 2027 2028 J = ROTATEZ(phi0, J[0], J[1], J[2]) 2029 L = ROTATEZ(phi0, L[0], L[1], L[2]) 2030 S1 = ROTATEZ(phi0, S1[0], S1[1], S1[2]) 2031 S2 = ROTATEZ(phi0, S2[0], S2[1], S2[2]) 2032 2033 # now J in in the N-x plane and form an angle theta_jn with N, rotate by -theta_jn around y to have J along z 2034 theta_jn = array_polar_ang(J) 2035 spins['theta_jn'] = theta_jn 2036 2037 J = ROTATEY(theta_jn, J[0], J[1], J[2]) 2038 L = ROTATEY(theta_jn, L[0], L[1], L[2]) 2039 S1 = ROTATEY(theta_jn, S1[0], S1[1], S1[2]) 2040 S2 = ROTATEY(theta_jn, S2[0], S2[1], S2[2]) 2041 2042 # J should now be || z and L should have a azimuthal angle phi_jl 2043 phi_jl = np.arctan2(L[1], L[0]) 2044 phi_jl = np.pi - phi_jl 2045 spins['phi_jl'] = phi_jl 2046 2047 # bring L in the Z-X plane, with negative x 2048 J = ROTATEZ(phi_jl, J[0], J[1], J[2]) 2049 L = ROTATEZ(phi_jl, L[0], L[1], L[2]) 2050 S1 = ROTATEZ(phi_jl, S1[0], S1[1], S1[2]) 2051 S2 = ROTATEZ(phi_jl, S2[0], S2[1], S2[2]) 2052 2053 theta0 = array_polar_ang(L) 2054 J = ROTATEY(theta0, J[0], J[1], J[2]) 2055 L = ROTATEY(theta0, L[0], L[1], L[2]) 2056 S1 = ROTATEY(theta0, S1[0], S1[1], S1[2]) 2057 S2 = ROTATEY(theta0, S2[0], S2[1], S2[2]) 2058 2059 # The last rotation is useless as it won't change the differenze in spins' azimuthal angles 2060 phi1 = np.arctan2(S1[1], S1[0]) 2061 phi2 = np.arctan2(S2[1], S2[0]) 2062 if phi2 < phi1: 2063 phi12 = phi2 - phi1 + 2.*np.pi 2064 else: 2065 phi12 = phi2 - phi1 2066 spins['phi12'] = phi12 2067 2068 return spins
2069
2070 -class BurstPosterior(Posterior):
2071 """ 2072 Data structure for a table of posterior samples . 2073 """
2074 - def __init__(self,commonResultsFormatData,SimBurstTableEntry=None,injFref=None,SnglBurstList=None,name=None,description=None,votfile=None):
2075 """ 2076 Constructor. 2077 2078 @param commonResultsFormatData: A 2D array containing the posterior 2079 samples and related data. The samples chains form the columns. 2080 @type commonResultsFormatData: custom type 2081 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values. 2082 @type SimInspiralTableEntry: glue.ligolw.lsctables.SimInspiral 2083 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers. 2084 @type SnglInspiralList: list 2085 """ 2086 common_output_table_header,common_output_table_raw =commonResultsFormatData 2087 self._posterior={} 2088 self._injFref=injFref 2089 self._injection=SimBurstTableEntry 2090 self._triggers=SnglBurstList 2091 self._loglaliases=['posterior', 'logl','logL','likelihood', 'deltalogl'] 2092 self._logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior'] 2093 2094 self._votfile=votfile 2095 2096 common_output_table_header=[i.lower() for i in common_output_table_header] 2097 2098 # Define XML mapping 2099 self._injXMLFuncMap={ 2100 'f0':lambda inj:inj.frequency, 2101 'frequency':lambda inj:inj.frequency, 2102 'centre_frequency':lambda inj:inj.frequency, 2103 'quality':lambda inj:inj.q, 2104 'hrss':lambda inj:inj.hrss, 2105 'loghrss':lambda inj:log(inj.hrss), 2106 'polar_angle':lambda inj:inj.pol_ellipse_angle, 2107 'pol_ellipse_angle':lambda inj:inj.pol_ellipse_angle, 2108 'pol_ellipse_e':lambda inj:inj.pol_ellipse_e, 2109 'alpha':lambda inj:inj.pol_ellipse_angle, 2110 'polar_eccentricity':lambda inj:inj.pol_ellipse_e, 2111 'eccentricity':lambda inj:inj.pol_ellipse_e, 2112 'time': lambda inj:float(inj.get_end()), 2113 'end_time': lambda inj:float(inj.get_end()), 2114 'ra':self._inj_longitude, 2115 'rightascension':self._inj_longitude, 2116 'long':self._inj_longitude, 2117 'longitude':self._inj_longitude, 2118 'dec':lambda inj:inj.dec, 2119 'declination':lambda inj:inj.dec, 2120 'lat':lambda inj:inj.dec, 2121 'latitude':lambda inj:inj.dec, 2122 'psi': lambda inj: np.mod(inj.psi, np.pi), 2123 'f_ref': lambda inj: self._injFref, 2124 'polarisation':lambda inj:inj.psi, 2125 'polarization':lambda inj:inj.psi, 2126 'duration':lambda inj:inj.duration, 2127 'h1_end_time':lambda inj:float(inj.get_end('H')), 2128 'l1_end_time':lambda inj:float(inj.get_end('L')), 2129 'v1_end_time':lambda inj:float(inj.get_end('V')), 2130 } 2131 2132 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header): 2133 2134 self._posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref,trigger_values=self._gettrigpar(param_name)) 2135 2136 logLFound=False 2137 2138 for loglalias in self._loglaliases: 2139 if loglalias in common_output_table_header: 2140 try: 2141 self._logL=self._posterior[loglalias].samples 2142 except KeyError: 2143 print "No '%s' column in input table!"%loglalias 2144 continue 2145 logLFound=True 2146 2147 if not logLFound: 2148 raise RuntimeError("No likelihood/posterior values found!") 2149 2150 self._logP=None 2151 for logpalias in self._logpaliases: 2152 if logpalias in common_output_table_header: 2153 try: 2154 self._logP=self._posterior[logpalias].samples 2155 except KeyError: 2156 print "No '%s' column in input table!"%logpalias 2157 continue 2158 if not 'log' in logpalias: 2159 self._logP=[np.log(i) for i in self._logP] 2160 if name is not None: 2161 self.__name=name 2162 2163 if description is not None: 2164 self.__description=description 2165 2166 return
2167 #=============================================================================== 2168 # Functions used to parse injection structure. 2169 #=============================================================================== 2170
2171 - def _inj_longitude(self,inj):
2172 """ 2173 Return the mapping of longitude found in inj to the interval [0,2*pi). 2174 2175 @type inj: glue.ligolw.lsctables.SimInspiral 2176 @param inj: a custom type with the attribute 'longitude'. 2177 @rtype: number 2178 """ 2179 if inj.ra>2*pi_constant or inj.ra<0.0: 2180 maplong=2*pi_constant*(((float(inj.ra)/(2*pi_constant)) - floor(((float(inj.ra))/(2*pi_constant))))) 2181 print "Warning: Injected longitude/ra (%s) is not within [0,2\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\pi). Mapped value is: %s."%(str(inj.ra),str(maplong)) 2182 return maplong 2183 else: 2184 return inj.ra
2185
2186 -class KDTree(object):
2187 """ 2188 A kD-tree. 2189 """
2190 - def __init__(self, objects):
2191 """ 2192 Construct a kD-tree from a sequence of objects. Each object 2193 should return its coordinates using obj.coord(). 2194 """ 2195 if len(objects) == 0: 2196 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list") 2197 elif len(objects) == 1: 2198 self._objects = objects[:] 2199 coord=self._objects[0].coord() 2200 self._bounds = coord,coord 2201 elif self._same_coords(objects): 2202 # All the same coordinates 2203 self._objects = [ objects[0] ] 2204 coord=self._objects[0].coord() 2205 self._bounds = coord,coord 2206 else: 2207 self._objects = objects[:] 2208 self._bounds = self._bounds_of_objects() 2209 low,high=self._bounds 2210 self._split_dim=self._longest_dimension() 2211 longest_dim = self._split_dim 2212 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[longest_dim]) 2213 N = len(sorted_objects) 2214 bound=0.5*(sorted_objects[N/2].coord()[longest_dim] + sorted_objects[N/2-1].coord()[longest_dim]) 2215 low = [obj for obj in self._objects if obj.coord()[longest_dim] < bound] 2216 high = [obj for obj in self._objects if obj.coord()[longest_dim] >= bound] 2217 if len(low)==0: 2218 # Then there must be multiple values with the same 2219 # coordinate as the minimum element of high 2220 low = [obj for obj in self._objects if obj.coord()[longest_dim]==bound] 2221 high = [obj for obj in self._objects if obj.coord()[longest_dim] > bound] 2222 self._left = KDTree(low) 2223 self._right = KDTree(high)
2224
2225 - def _same_coords(self, objects):
2226 """ 2227 True if and only if all the given objects have the same 2228 coordinates. 2229 """ 2230 if len(objects) <= 1: 2231 return True 2232 coords = [obj.coord() for obj in objects] 2233 c0 = coords[0] 2234 for ci in coords[1:]: 2235 if not np.all(ci == c0): 2236 return False 2237 return True
2238
2239 - def _bounds_of_objects(self):
2240 """ 2241 Bounds of the objects contained in the tree. 2242 """ 2243 low=self._objects[0].coord() 2244 high=self._objects[0].coord() 2245 for obj in self._objects[1:]: 2246 low=np.minimum(low,obj.coord()) 2247 high=np.maximum(high,obj.coord()) 2248 return low,high
2249
2250 - def _longest_dimension(self):
2251 """ 2252 Longest dimension of the tree bounds. 2253 """ 2254 low,high = self._bounds 2255 widths = high-low 2256 return np.argmax(widths)
2257
2258 - def objects(self):
2259 """ 2260 Returns the objects in the tree. 2261 """ 2262 return self._objects[:]
2263
2264 - def __iter__(self):
2265 """ 2266 Iterator over all the objects contained in the tree. 2267 """ 2268 return self._objects.__iter__()
2269
2270 - def left(self):
2271 """ 2272 Returns the left tree. 2273 """ 2274 return self._left
2275
2276 - def right(self):
2277 """ 2278 Returns the right tree. 2279 """ 2280 return self._right
2281
2282 - def split_dim(self):
2283 """ 2284 Returns the dimension along which this level of the kD-tree 2285 splits. 2286 """ 2287 return self._split_dim
2288
2289 - def bounds(self):
2290 """ 2291 Returns the coordinates of the lower-left and upper-right 2292 corners of the bounding box for this tree: low_left, up_right 2293 """ 2294 return self._bounds
2295
2296 - def volume(self):
2297 """ 2298 Returns the volume of the bounding box of the tree. 2299 """ 2300 v = 1.0 2301 low,high=self._bounds 2302 for l,h in zip(low,high): 2303 v = v*(h - l) 2304 return v
2305
2306 - def integrate(self,f,boxing=64):
2307 """ 2308 Returns the integral of f(objects) over the tree. The 2309 optional boxing parameter determines how deep to descend into 2310 the tree before computing f. 2311 """ 2312 # if len(self._objects) <= boxing: 2313 # return self.volume()*f(self._objects) 2314 # else: 2315 # return self._left.integrate(f, boxing) + self._right.integrate(f, boxing) 2316 2317 def x(tree): 2318 return tree.volume()*f(tree._objects)
2319 2320 def y(a,b): 2321 return a+b
2322 2323 return self.operate(x,y,boxing=boxing) 2324
2325 - def operate(self,f,g,boxing=64):
2326 """ 2327 Operates on tree nodes exceeding boxing parameter depth. 2328 """ 2329 if len(self._objects) <= boxing: 2330 return f(self) 2331 else: 2332 2333 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2334
2335 2336 -class KDTreeVolume(object):
2337 """ 2338 A kD-tree suitable for splitting parameter spaces and counting hypervolumes. 2339 Is modified from the KDTree class so that bounding boxes are stored. This means that 2340 there are no longer gaps in the hypervolume once the samples have been split into groups. 2341 """
2342 - def __init__(self, objects,boundingbox,dims=0):
2343 """ 2344 Construct a kD-tree from a sequence of objects. Each object 2345 should return its coordinates using obj.coord(). 2346 the obj should also store the bounds of the hypervolume its found in. 2347 for non-leaf objects we need the name of the dimension split and value at split. 2348 """ 2349 self._dimension = dims 2350 self._bounds = boundingbox 2351 self._weight = 1 2352 if len(objects) == 0: #for no objects - something is wrong, i think it can only happen in first call 2353 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list") 2354 elif len(objects) == 1: #1 object, have reached leaf of tree 2355 self._objects = objects[:] 2356 elif self._same_coords(objects): # When ALL samples have the same coordinates in all dimensions 2357 self._weight = len(objects) 2358 self._objects = [ objects[0] ] #need to modify kdtree_bin functions to use _weight to get correct number of samples 2359 coord=self._objects[0].coord() 2360 else: #construct next level of tree with multiple samples 2361 self._objects = objects[:] 2362 split_dim = self._dimension 2363 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[split_dim]) 2364 N = len(sorted_objects) 2365 self._split_value = 0.5*(sorted_objects[N/2].coord()[split_dim] + sorted_objects[N/2-1].coord()[split_dim]) 2366 bound = self._split_value 2367 low = [obj for obj in self._objects if obj.coord()[split_dim] < bound] 2368 high = [obj for obj in self._objects if obj.coord()[split_dim] >= bound] 2369 if len(low)==0: 2370 # Then there must be multiple values with the same 2371 # coordinate as the minimum element of 'high' 2372 low = [obj for obj in self._objects if obj.coord()[split_dim] == bound] 2373 high = [obj for obj in self._objects if obj.coord()[split_dim] > bound] 2374 leftBoundingbox = [] 2375 rightBoundingbox = [] 2376 for i in self._bounds: 2377 leftBoundingbox.append(list(i)) 2378 rightBoundingbox.append(list(i)) 2379 leftBoundingbox[1][split_dim] = bound 2380 rightBoundingbox[0][split_dim] = bound 2381 # designate the next dimension to use for split for sub-trees 2382 # if has got to the end of the list of dimensions then starts 2383 # again at dimension = 0 2384 if (split_dim < (len(self._objects[0].coord()) - 1)): 2385 child_dim = split_dim + 1 2386 else: 2387 child_dim = 0 2388 self._left = KDTreeVolume(low,leftBoundingbox,dims = child_dim) 2389 # added in a load of messing about incase there are repeated values in the currently checked dimension 2390 if (len(high) != 0): 2391 self._right = KDTreeVolume(high,rightBoundingbox,dims = child_dim) 2392 else: 2393 self._right = None
2394
2395 - def _same_coords(self, objects):
2396 """ 2397 True if and only if all the given objects have the same 2398 coordinates. 2399 """ 2400 if len(objects) <= 1: 2401 return True 2402 coords = [obj.coord() for obj in objects] 2403 c0 = coords[0] 2404 for ci in coords[1:]: 2405 if not np.all(ci == c0): 2406 return False 2407 return True
2408
2409 - def objects(self):
2410 """ 2411 Returns the objects in the tree. 2412 """ 2413 return self._objects[:]
2414
2415 - def __iter__(self):
2416 """ 2417 Iterator over all the objects contained in the tree. 2418 """ 2419 return self._objects.__iter__()
2420
2421 - def left(self):
2422 """ 2423 Returns the left tree. 2424 """ 2425 return self._left
2426
2427 - def right(self):
2428 """ 2429 Returns the right tree. 2430 """ 2431 return self._right
2432
2433 - def split_dim(self):
2434 """ 2435 Returns the dimension along which this level of the kD-tree 2436 splits. 2437 """ 2438 return self._split_dim
2439
2440 - def bounds(self):
2441 """ 2442 Returns the coordinates of the lower-left and upper-right 2443 corners of the bounding box for this tree: low_left, up_right 2444 """ 2445 return self._bounds
2446
2447 - def volume(self):
2448 """ 2449 Returns the volume of the bounding box of the tree. 2450 """ 2451 v = 1.0 2452 low,high=self._bounds 2453 for l,h in zip(low,high): 2454 v = v*(h - l) 2455 return v
2456
2457 - def integrate(self,f,boxing=64):
2458 """ 2459 Returns the integral of f(objects) over the tree. The 2460 optional boxing parameter determines how deep to descend into 2461 the tree before computing f. 2462 """ 2463 def x(tree): 2464 return tree.volume()*f(tree._objects)
2465 2466 def y(a,b): 2467 return a+b
2468 2469 return self.operate(x,y,boxing=boxing) 2470
2471 - def operate(self,f,g,boxing=64):
2472 """ 2473 Operates on tree nodes exceeding boxing parameter depth. 2474 """ 2475 if len(self._objects) <= boxing: 2476 return f(self) 2477 else: 2478 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2479
2480 - def search(self,coordinates,boxing = 64):
2481 """ 2482 takes a set of coordinates and searches down through the tree untill it gets 2483 to a box with less than 'boxing' objects in it and returns the box bounds, 2484 number of objects in the box, and the weighting. 2485 """ 2486 if len(self._objects) <= boxing: 2487 return self._bounds,len(self._objects),self._weight 2488 elif coordinates[self._dimension] < self._split_value: 2489 return self._left.search(coordinates,boxing) 2490 else: 2491 return self._right.search(coordinates,boxing)
2492
2493 - def fillNewTree(self,boxing = 64, isArea = False):
2494 """ 2495 copies tree structure, but with KDSkeleton as the new nodes. 2496 """ 2497 boxN = boxing 2498 if len(self._objects) <= boxN: 2499 newNode = KDSkeleton(self.bounds(), left_child = None , right_child = None) 2500 if isArea: 2501 newNode.setImportance(len(self._objects),skyArea(self.bounds())) 2502 else: 2503 newNode.setImportance(len(self._objects),self.volume()) 2504 return newNode 2505 else: 2506 if isArea: 2507 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN,isArea=True), right_child = self._right.fillNewTree(boxN,isArea=True)) 2508 newNode.setImportance(len(self._objects),skyArea(self.bounds())) 2509 else: 2510 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN), right_child = self._right.fillNewTree(boxN)) 2511 newNode.setImportance(len(self._objects),self.volume()) 2512 newNode.setSplit(self._dimension,self._split_value) 2513 return newNode
2514
2515 -class KDSkeleton(object):
2516 """ 2517 object to store the structure of a kd tree 2518 """ 2519
2520 - def __init__(self, bounding_box, left_child = None, right_child = None):
2521 self._bounds = bounding_box 2522 #self._names = coordinate_names 2523 self._left = left_child 2524 self._right = right_child 2525 self._samples = 0 2526 self._splitValue = None 2527 self._splitDim = None 2528 self._importance = None 2529 self._volume = None
2530
2531 - def addSample(self):
2532 self._samples +=1
2533
2534 - def bounds(self):
2535 return self._bounds
2536
2537 - def search(self,coordinates):
2538 """ 2539 takes a set of coordinates and searches down through the tree untill it gets 2540 to a box with less than 'boxing' objects in it and returns the box bounds, 2541 number of objects in the box, and the weighting. 2542 """ 2543 if self._left is None: 2544 return self._bounds, self._samples, self._importance 2545 elif coordinates[self._splitDim] < self._splitValue: 2546 return self._left.search(coordinates) 2547 else: 2548 return self._right.search(coordinates)
2549
2550 - def setImportance(self, sampleNumber, volume):
2551 self._importance = sampleNumber/volume 2552 self._volume = volume
2553
2554 - def setSplit(self,dimension,value):
2555 self._splitDim = dimension 2556 self._splitValue = value
2557
2558 2559 -class PosteriorSample(object):
2560 """ 2561 A single parameter sample object, suitable for inclusion in a 2562 kD-tree. 2563 """ 2564
2565 - def __init__(self, sample_array, headers, coord_names):
2566 """ 2567 Given the sample array, headers for the values, and the names 2568 of the desired coordinates, construct a parameter sample 2569 object. 2570 """ 2571 self._samples=sample_array[:] 2572 self._headers=headers 2573 if not (len(sample_array) == len(self._headers)): 2574 print "Header length = ", len(self._headers) 2575 print "Sample length = ", len(sample_array) 2576 raise RuntimeError("parameter and sample lengths do not agree") 2577 self._coord_names=coord_names 2578 self._coord_indexes=[self._headers.index(name) for name in coord_names]
2579
2580 - def __getitem__(self, key):
2581 """ 2582 Return the element with the corresponding name. 2583 """ 2584 key=key.lower() 2585 if key in self._headers: 2586 idx=self._headers.index(key) 2587 return self._samples[idx] 2588 else: 2589 raise KeyError("key not found in posterior sample: %s"%key)
2590
2591 - def coord(self):
2592 """ 2593 Return the coordinates for the parameter sample. 2594 """ 2595 return self._samples[self._coord_indexes]
2596
2597 2598 2599 2600 2601 -class AnalyticLikelihood(object):
2602 """ 2603 Return analytic likelihood values. 2604 """ 2605
2606 - def __init__(self, covariance_matrix_files, mean_vector_files):
2607 """ 2608 Prepare analytic likelihood for the given parameters. 2609 """ 2610 # Make sure files names are in a list 2611 if isinstance(covariance_matrix_files, str): 2612 covariance_matrix_files = [covariance_matrix_files] 2613 if isinstance(mean_vector_files, str): 2614 mean_vector_files = [mean_vector_files] 2615 2616 covarianceMatrices = [np.loadtxt(csvFile, delimiter=',') for csvFile in covariance_matrix_files] 2617 num_matrices = len(covarianceMatrices) 2618 2619 if num_matrices != len(mean_vector_files): 2620 raise RuntimeError('Must give a mean vector list for every covariance matrix') 2621 2622 param_line = open(mean_vector_files[0]).readline() 2623 self._params = [param.strip() for param in param_line.split(',')] 2624 2625 converter=lambda x: eval(x.replace('pi','%.32f'%pi_constant)) # converts fractions w/ pi (e.g. 3.0*pi/2.0) 2626 self._modes = [] 2627 for i in range(num_matrices): 2628 CM = covarianceMatrices[i] 2629 vecFile = mean_vector_files[i] 2630 2631 param_line = open(vecFile).readline() 2632 params = [param.strip() for param in param_line.split(',')] 2633 if set(params)!=set(self._params): 2634 raise RuntimeError('Parameters do not agree between mean vector files.') 2635 2636 sigmas = dict(zip(params,np.sqrt(CM.diagonal()))) 2637 colNums = range(len(params)) 2638 converters = dict(zip(colNums,[converter for i in colNums])) 2639 meanVectors = np.loadtxt(vecFile, delimiter=',', skiprows=1, converters=converters) 2640 try: 2641 for vec in meanVectors: 2642 means = dict(zip(params,vec)) 2643 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params] 2644 self._modes.append(dict(mode)) 2645 except TypeError: 2646 means = dict(zip(params,meanVectors)) 2647 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params] 2648 self._modes.append(dict(mode)) 2649 self._num_modes = len(self._modes)
2650
2651 - def pdf(self, param):
2652 """ 2653 Return PDF function for parameter. 2654 """ 2655 pdf = None 2656 if param in self._params: 2657 pdf = lambda x: (1.0/self._num_modes) * sum([mode[param].pdf(x) for mode in self._modes]) 2658 return pdf
2659
2660 - def cdf(self, param):
2661 """ 2662 Return PDF function for parameter. 2663 """ 2664 cdf = None 2665 if param in self._params: 2666 cdf = lambda x: (1.0/self._num_modes) * sum([mode[param].cdf(x) for mode in self._modes]) 2667 return cdf
2668 2669 @property
2670 - def names(self):
2671 """ 2672 Return list of parameter names described by analytic likelihood function. 2673 """ 2674 return self._params
2675
2676 2677 2678 #=============================================================================== 2679 # Web page creation classes (wrap ElementTrees) 2680 #=============================================================================== 2681 2682 -class htmlChunk(object):
2683 """ 2684 A base class for representing web content using ElementTree . 2685 """
2686 - def __init__(self,tag,attrib=None,parent=None):
2687 2688 self._html=Element(tag)#attrib={'xmlns':"http://www.w3.org/1999/xhtml"}) 2689 if attrib: 2690 for attribname,attribvalue in attrib.items(): 2691 self._html.attrib[attribname]=attribvalue 2692 if parent: 2693 parent.append(self._html)
2694
2695 - def toprettyxml(self):
2696 """ 2697 Return a pretty-printed XML string of the htmlPage. 2698 """ 2699 elem=self._html 2700 rough_string = tostring(elem) 2701 reparsed = minidom.parseString(rough_string) 2702 return reparsed.toprettyxml(indent=" ")
2703
2704 - def __str__(self):
2705 return self.toprettyxml()
2706
2707 - def write(self,string):
2708 parser=XMLParser() 2709 parser.feed(string) 2710 Estr=parser.close() 2711 self._html.append(Estr)
2712
2713 - def p(self,pstring):
2714 Ep=Element('p') 2715 Ep.text=pstring 2716 self._html.append(Ep) 2717 return Ep
2718
2719 - def h1(self,h1string):
2720 Ep=Element('h1') 2721 Ep.text=h1string 2722 self._html.append(Ep) 2723 return Ep
2724 #
2725 - def h5(self,h1string):
2726 Ep=Element('h5') 2727 Ep.text=h1string 2728 self._html.append(Ep) 2729 return Ep
2730
2731 - def h2(self,h2string):
2732 Ep=Element('h2') 2733 Ep.text=h2string 2734 self._html.append(Ep) 2735 return Ep
2736
2737 - def h3(self,h1string):
2738 Ep=Element('h3') 2739 Ep.text=h1string 2740 self._html.append(Ep) 2741 return Ep
2742
2743 - def br(self):
2744 Ebr=Element('br') 2745 self._html.append(Ebr) 2746 return Ebr
2747
2748 - def hr(self):
2749 Ehr=Element('hr') 2750 self._html.append(Ehr) 2751 return Ehr
2752
2753 - def a(self,url,linktext):
2754 Ea=Element('a') 2755 Ea.attrib['href']=url 2756 Ea.text=linktext 2757 self._html.append(Ea) 2758 return Ea
2759
2760 - def tab(self,idtable=None):
2761 args={} 2762 if idtable is not None: 2763 args={'id':idtable} 2764 2765 Etab=Element('table',args) 2766 self._html.append(Etab) 2767 return Etab
2768
2769 - def insert_row(self,tab,label=None):
2770 2771 """ 2772 Insert row in table tab. 2773 If given, label used as id for the table tag 2774 """ 2775 2776 Etr=Element('tr') 2777 if label is not None: 2778 Etr.attrib['id']=label 2779 tab.append(Etr) 2780 return Etr
2781
2782 - def insert_td(self,row,td,label=None,legend=None):
2783 """ 2784 Insert cell td into row row. 2785 Sets id to label, if given 2786 """ 2787 2788 Etd=Element('td') 2789 2790 if type(td) is str: 2791 Etd.text=td 2792 else: 2793 td=tostring(td) 2794 td=minidom.parseString(td) 2795 td=td.toprettyxml(indent=" ") 2796 Etd.text=td 2797 if label is not None: 2798 Etd.attrib['id']=label 2799 if legend is not None: 2800 legend.a('#%s'%label,'%s'%label) 2801 legend.br() 2802 row.append(Etd) 2803 return Etd
2804
2805 - def append(self,element):
2806 self._html.append(element)
2807
2808 2809 # 2810 -class htmlPage(htmlChunk):
2811 """ 2812 A concrete class for generating an XHTML(1) document. Inherits from htmlChunk. 2813 """
2814 - def __init__(self,title=None,css=None,javascript=None,toc=False):
2815 htmlChunk.__init__(self,'html',attrib={'xmlns':"http://www.w3.org/1999/xhtml"}) 2816 self.doctype_str='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">' 2817 2818 self._head=SubElement(self._html,'head') 2819 Etitle=SubElement(self._head,'title') 2820 self._body=SubElement(self._html,'body') 2821 self._css=None 2822 self._jscript=None 2823 if title is not None: 2824 Etitle.text=str(title) 2825 self._title=SubElement(self._body,'h1') 2826 self._title.text=title 2827 if javascript is not None: 2828 self._jscript=SubElement(self._head,'script') 2829 self._jscript.attrib['type']="text/javascript" 2830 self._jscript.text=str(javascript) 2831 if css is not None: 2832 self._css=SubElement(self._head,'style') 2833 self._css.attrib['type']="text/css" 2834 self._css.text=str(css)
2835
2836 - def __str__(self):
2837 return self.doctype_str+'\n'+self.toprettyxml()
2838
2839 - def add_section(self,section_name,legend=None):
2840 newSection=htmlSection(section_name) 2841 self._body.append(newSection._html) 2842 if legend is not None: 2843 legend.a('#%s'%section_name,'%s'%section_name) 2844 legend.br() 2845 return newSection
2846
2847 - def add_collapse_section(self,section_name,legend=None,innertable_id=None,start_closed=True):
2848 """ 2849 Create a section embedded into a table that can be collapsed with a button 2850 """ 2851 newSection=htmlCollapseSection(section_name,table_id=innertable_id,start_closed=start_closed) 2852 self._body.append(newSection._html) 2853 if legend is not None: 2854 legend.a('#%s'%section_name,'%s'%section_name) 2855 legend.br() 2856 return newSection
2857
2858 - def add_section_to_element(self,section_name,parent):
2859 """ 2860 Create a section which is not appended to the body of html, but to the parent Element 2861 """ 2862 newSection=htmlSection(section_name,htmlElement=parent,blank=True) 2863 parent.append(newSection._html) 2864 return newSection
2865 2866 2867 @property
2868 - def body():
2869 return self._body
2870 2871 @property
2872 - def head():
2873 return self._head
2874
2875 2876 -class htmlSection(htmlChunk):
2877 """ 2878 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk. 2879 """
2880 - def __init__(self,section_name,htmlElement=None,blank=False):
2881 if not blank: 2882 htmlChunk.__init__(self,'div',attrib={'class':'ppsection','id':section_name},parent=htmlElement) 2883 else: 2884 htmlChunk.__init__(self,'div',attrib={'style':'"color:#000000"','id':section_name},parent=htmlElement) 2885 self.h2(section_name)
2886
2887 2888 -class htmlCollapseSection(htmlChunk):
2889 """ 2890 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk. 2891 """ 2892
2893 - def __init__(self,section_name,htmlElement=None,table_id=None,start_closed=True):