33This module contains classes and functions for post-processing the output
34of the Bayesian parameter estimation codes.
41from math
import cos,ceil,floor,sqrt,pi
as pi_constant
42from xml.dom
import minidom
43from operator
import itemgetter
48from .io
import read_samples
54from matplotlib
import pyplot
as plt,lines
as mpl_lines
55from scipy
import stats
56from scipy
import special
57from scipy
import signal
58from scipy.optimize
import newton
59from scipy
import interpolate
60from scipy
import integrate
63from itertools
import combinations
64from lalinference
import LALInferenceHDF5PosteriorSamplesDatasetName
as posterior_grp_name
68 import lalsimulation
as lalsim
70 print(
'Cannot import lalsimulation SWIG bindings')
74 from .imrtgr.nrutils
import bbh_average_fits_precessing
76 print(
'Cannot import lalinference.imrtgr.nrutils. Will suppress final parameter and peak luminosity calculations.')
81 hostname_short=socket.gethostbyaddr(socket.gethostname())[0].split(
'.',1)[1]
83 hostname_short=
'Unknown'
84if hostname_short==
'ligo.caltech.edu' or hostname_short==
'cluster.ldas.cit':
85 matplotlib.rcParams.update(
86 {
'mathtext.fontset' :
"custom",
87 'mathtext.fallback' :
'cm'
90from xml.etree.cElementTree
import Element, SubElement, tostring, XMLParser
94from lal
import LIGOTimeGPS, TimeDelayFromEarthCenter
95from .
import git_version
97__author__=
"Ben Aylott <benjamin.aylott@ligo.org>, Ben Farr <bfarr@u.northwestern.edu>, Will M. Farr <will.farr@ligo.org>, John Veitch <john.veitch@ligo.org>, Vivien Raymond <vivien.raymond@ligo.org>"
98__version__=
"git id %s"%git_version.id
99__date__= git_version.date
102 return siminspiral.geocent_end_time + 1e-9*siminspiral.geocent_end_time_ns
105 """Workaround for missing astropy.table.Table.replace_column method,
106 which was added in Astropy 1.1.
108 FIXME: remove this function when LALSuite depends on Astropy >= 1.1.
"""
109 index = table.colnames.index(old)
110 table.remove_column(old)
111 table.add_column(astropy.table.Column(new, name=old), index=index)
114 """Workaround for missing astropy.table.Table.as_array method,
115 which was added in Astropy 1.0.
117 FIXME: remove this function when LALSuite depends on Astropy >= 1.0.
"""
119 return table.as_array()
127logParams=[
'logl',
'loglh1',
'loglh2',
'logll1',
'loglv1',
'deltalogl',
'deltaloglh1',
'deltalogll1',
'deltaloglv1',
'logw',
'logprior',
'logpost',
'nulllogl',
'chain_log_evidence',
'chain_delta_log_evidence',
'chain_log_noise_evidence',
'chain_log_bayes_factor']
129relativePhaseParams=[ a+b+
'_relative_phase' for a,b
in combinations([
'h1',
'l1',
'v1'],2)]
130snrParams=[
'snr',
'optimal_snr',
'matched_filter_snr',
'coherence'] + [
'%s_optimal_snr'%(i)
for i
in [
'h1',
'l1',
'v1']] + [
'%s_cplx_snr_amp'%(i)
for i
in [
'h1',
'l1',
'v1']] + [
'%s_cplx_snr_arg'%(i)
for i
in [
'h1',
'l1',
'v1']] + relativePhaseParams
131calAmpParams=[
'calamp_%s'%(ifo)
for ifo
in [
'h1',
'l1',
'v1']]
132calPhaseParams=[
'calpha_%s'%(ifo)
for ifo
in [
'h1',
'l1',
'v1']]
133calParams = calAmpParams + calPhaseParams
135massParams=[
'm1',
'm2',
'chirpmass',
'mchirp',
'mc',
'eta',
'q',
'massratio',
'asym_massratio',
'mtotal',
'mf',
'mf_evol',
'mf_nonevol']
137spinParamsPrec=[
'a1',
'a2',
'phi1',
'theta1',
'phi2',
'theta2',
'costilt1',
'costilt2',
'costheta_jn',
'cosbeta',
'tilt1',
'tilt1_isco',
'tilt2',
'tilt2_isco',
'phi_jl',
'theta_jn',
'phi12',
'phi12_isco',
'af',
'af_evol',
'af_nonevol',
'afz',
'afz_evol',
'afz_nonevol']
138spinParamsAli=[
'spin1',
'spin2',
'a1z',
'a2z']
139spinParamsEff=[
'chi',
'effectivespin',
'chi_eff',
'chi_tot',
'chi_p']
140spinParams=spinParamsPrec+spinParamsEff+spinParamsAli
142cosmoParam=[
'm1_source',
'm2_source',
'mtotal_source',
'mc_source',
'redshift',
'mf_source',
'mf_source_evol',
'mf_source_nonevol',
'm1_source_maxldist',
'm2_source_maxldist',
'mtotal_source_maxldist',
'mc_source_maxldist',
'redshift_maxldist',
'mf_source_maxldist',
'mf_source_maxldist_evol',
'mf_source_maxldist_nonevol']
144ppEParams=[
'ppEalpha',
'ppElowera',
'ppEupperA',
'ppEbeta',
'ppElowerb',
'ppEupperB',
'alphaPPE',
'aPPE',
'betaPPE',
'bPPE']
145tigerParams= [
'dchi%i'%(i)
for i
in range(8)] + [
'dchi%il'%(i)
for i
in [5,6] ] + [
'dxi%d'%(i+1)
for i
in range(6)] + [
'dalpha%i'%(i+1)
for i
in range(5)] + [
'dbeta%i'%(i+1)
for i
in range(3)] + [
'dsigma%i'%(i+1)
for i
in range(4)] + [
'dipolecoeff'] + [
'dchiminus%i'%(i)
for i
in [1,2]] + [
'dchiMinus%i'%(i)
for i
in [1,2]] + [
'db1',
'db2',
'db3',
'db4',
'dc1',
'dc2',
'dc4',
'dcl'] + [
'damp21',
'damp33']
146qnmtestParams=[
'domega220',
'dtau220',
'domega210',
'dtau210',
'domega330',
'dtau330',
'domega440',
'dtau440',
'domega550',
'dtau550']
147bransDickeParams=[
'omegaBD',
'ScalarCharge1',
'ScalarCharge2']
148massiveGravitonParams=[
'lambdaG']
149lorentzInvarianceViolationParams=[
'log10lambda_a',
'lambda_a',
'log10lambda_eff',
'lambda_eff',
'log10livamp',
'liv_amp']
150tidalParams=[
'lambda1',
'lambda2',
'lam_tilde',
'dlam_tilde',
'lambdat',
'dlambdat',
'lambdas',
'bluni']
151fourPiecePolyParams=[
'logp1',
'gamma1',
'gamma2',
'gamma3']
152spectralParams=[
'sdgamma0',
'sdgamma1',
'sdgamma2',
'sdgamma3']
153energyParams=[
'e_rad',
'e_rad_evol',
'e_rad_nonevol',
'l_peak',
'l_peak_evol',
'l_peak_nonevol',
'e_rad_maxldist',
'e_rad_maxldist_evol',
'e_rad_maxldist_nonevol']
154spininducedquadParams = [
'dquadmon1',
'dquadmon2',
'dquadmona',
'dquadmona']
156 ppEParams + tigerParams + bransDickeParams + massiveGravitonParams
157 + tidalParams + fourPiecePolyParams + spectralParams + energyParams
158 + lorentzInvarianceViolationParams + spininducedquadParams + qnmtestParams
162distParams=[
'distance',
'distMPC',
'dist',
'distance_maxl']
163incParams=[
'iota',
'inclination',
'cosiota']
164polParams=[
'psi',
'polarisation',
'polarization']
165skyParams=[
'ra',
'rightascension',
'declination',
'dec']
166phaseParams=[
'phase',
'phi0',
'phase_maxl']
168timeParams=[
'time',
'time_mean']
169endTimeParams=[
'l1_end_time',
'h1_end_time',
'v1_end_time']
171statsParams=[
'logprior',
'logl',
'deltalogl',
'deltaloglh1',
'deltalogll1',
'deltaloglv1',
'deltaloglh2',
'deltaloglg1']
172calibParams=[
'calpha_l1',
'calpha_h1',
'calpha_v1',
'calamp_l1',
'calamp_h1',
'calamp_v1']
175confidenceLevels=[0.67,0.9,0.95,0.99]
177greedyBinSizes={
'mc':0.025,
'm1':0.1,
'm2':0.1,
'mass1':0.1,
'mass2':0.1,
'mtotal':0.1,
'mc_source':0.025,
'm1_source':0.1,
'm2_source':0.1,
'mtotal_source':0.1,
'mc_source_maxldist':0.025,
'm1_source_maxldist':0.1,
'm2_source_maxldist':0.1,
'mtotal_source_maxldist':0.1,
'eta':0.001,
'q':0.01,
'asym_massratio':0.01,
'iota':0.01,
'cosiota':0.02,
'time':1e-4,
'time_mean':1e-4,
'distance':1.0,
'dist':1.0,
'distance_maxl':1.0,
'redshift':0.01,
'redshift_maxldist':0.01,
'mchirp':0.025,
'chirpmass':0.025,
'spin1':0.04,
'spin2':0.04,
'a1z':0.04,
'a2z':0.04,
'a1':0.02,
'a2':0.02,
'phi1':0.05,
'phi2':0.05,
'theta1':0.05,
'theta2':0.05,
'ra':0.05,
'dec':0.05,
'chi':0.05,
'chi_eff':0.05,
'chi_tot':0.05,
'chi_p':0.05,
'costilt1':0.02,
'costilt2':0.02,
'thatas':0.05,
'costheta_jn':0.02,
'beta':0.05,
'omega':0.05,
'cosbeta':0.02,
'ppealpha':1.0,
'ppebeta':1.0,
'ppelowera':0.01,
'ppelowerb':0.01,
'ppeuppera':0.01,
'ppeupperb':0.01,
'polarisation':0.04,
'rightascension':0.05,
'declination':0.05,
'massratio':0.001,
'inclination':0.01,
'phase':0.05,
'tilt1':0.05,
'tilt2':0.05,
'phi_jl':0.05,
'theta_jn':0.05,
'phi12':0.05,
'flow':1.0,
'phase_maxl':0.05,
'calamp_l1':0.01,
'calamp_h1':0.01,
'calamp_v1':0.01,
'calpha_h1':0.01,
'calpha_l1':0.01,
'calpha_v1':0.01,
'logdistance':0.1,
'psi':0.1,
'costheta_jn':0.1,
'mf':0.1,
'mf_evol':0.1,
'mf_nonevol':0.1,
'mf_source':0.1,
'mf_source_evol':0.1,
'mf_source_nonevol':0.1,
'mf_source_maxldist':0.1,
'mf_source_maxldist_evol':0.1,
'mf_source_maxldist_nonevol':0.1,
'af':0.02,
'af_evol':0.02,
'af_nonevol':0.02,
'afz':0.02,
'afz_evol':0.01,
'afz_nonevol':0.01,
'e_rad':0.1,
'e_rad_evol':0.1,
'e_rad_nonevol':0.1,
'e_rad_maxldist':0.1,
'e_rad_maxldist_evol':0.1,
'e_rad_maxldist_nonevol':0.1,
'l_peak':0.1,
'l_peak_evol':0.1,
'l_peak_nonevol':0.1}
179 greedyBinSizes[s]=0.05
180for derived_time
in [
'h1_end_time',
'l1_end_time',
'v1_end_time',
'h1l1_delay',
'l1v1_delay',
'h1v1_delay']:
181 greedyBinSizes[derived_time]=greedyBinSizes[
'time']
182for derived_phase
in relativePhaseParams:
183 greedyBinSizes[derived_phase]=0.05
184for param
in tigerParams + bransDickeParams + massiveGravitonParams + lorentzInvarianceViolationParams + qnmtestParams:
185 greedyBinSizes[param]=0.01
186for param
in tidalParams:
187 greedyBinSizes[param]=2.5
188for param
in fourPiecePolyParams:
189 greedyBinSizes[param]=2.5
190for param
in spectralParams:
191 greedyBinSizes[param]=2.5
192for param
in spininducedquadParams:
193 greedyBinSizes[param]=2.5
195for loglname
in statsParams:
196 greedyBinSizes[loglname]=0.1
199 location = lal.cached_detector_by_prefix[ifo_prefix].location
202 t_gps = LIGOTimeGPS(inj.geocent_end_time, inj.geocent_end_time_ns)
203 t0 = inj.geocent_end_time + 1e-9*inj.geocent_end_time_ns
204 return t0 + TimeDelayFromEarthCenter(location,ra,dec,t_gps)
207__default_line_styles=[
'solid',
'dashed',
'dashdot',
'dotted']
209__default_color_lst=[
'r',
'b',
'y',
'g',
'c',
'm']
211__default_css_string=
"""
214font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
241font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
243border-collapse:collapse;
248border:1px solid #B5C1CF;
249padding:3px 7px 2px 7px;
257background-color:#B3CEEF;
282border-bottom-style:double;
286__default_javascript_string='''
288function toggle_visibility(tbid,lnkid)
291 if(document.all){document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'block' ?
'none' :
'block';}
293 else{document.getElementById(tbid).style.display = document.getElementById(tbid).style.display ==
'table' ?
'none' :
'table';}
295 document.getElementById(lnkid).value = document.getElementById(lnkid).value ==
'[-] Collapse' ?
'[+] Expand' :
'[-] Collapse';
303#===============================================================================
304# Function to return the correct prior distribution for selected parameters
305#===============================================================================
316 'mtotal_source':
None,
319 'm1_source_maxldist':
None,
320 'm2_source_maxldist':
None,
321 'mtotal_source_maxldist':
None,
322 'mc_source_maxldist':
None,
323 'redshift_maxldist':
None,
328 'mf_source_evol':
None,
329 'mf_source_nonevol':
None,
330 'mf_source_maxldist':
None,
331 'mf_source_maxldist_evol':
None,
332 'mf_source_maxldist_nonevol':
None,
341 'e_rad_nonevol':
None,
342 'e_rad_maxldist':
None,
343 'e_rad_maxldist_evol':
None,
344 'e_rad_maxldist_nonevol':
None,
347 'l_peak_nonevol':
None,
365 'costilt1':
'uniform',
366 'costilt2':
'uniform',
370 'time_mean':
'uniform',
372 'distance_maxl':
'x**2',
378 'costheta_jn':
'uniform',
393 'lambda1' :
'uniform',
394 'lambda2':
'uniform',
407 'calamp_h1' :
'uniform',
408 'calamp_l1' :
'uniform',
409 'calpha_h1' :
'uniform',
410 'calpha_l1' :
'uniform',
411 'polar_eccentricity':
'uniform',
412 'polar_angle':
'uniform',
416 return distributions(name)
425 A lookup table for plot labels.
427 m1_names = ['mass1',
'm1']
428 m2_names = [
'mass2',
'm2']
429 mc_names = [
'mc',
'mchirp',
'chirpmass']
430 eta_names = [
'eta',
'massratio',
'sym_massratio']
431 q_names = [
'q',
'asym_massratio']
432 iota_names = [
'iota',
'incl',
'inclination']
433 dist_names = [
'dist',
'distance']
434 ra_names = [
'rightascension',
'ra']
435 dec_names = [
'declination',
'dec']
436 phase_names = [
'phi_orb',
'phi',
'phase',
'phi0']
437 gr_test_names = [
'dchiMinus2',
'dchiMinus1'] + [
'dchi%d'%i
for i
in range(8)]+[
'dchil%d'%i
for i
in [5,6]]+[
'dxi%d'%(i+1)
for i
in range(6)]+[
'dalpha%d'%(i+1)
for i
in range(5)]+[
'dbeta%d'%(i+1)
for i
in range(3)]+[
'dsigma%d'%(i+1)
for i
in range(4)] + [
'dipolecoeff'] + [
'db1',
'db2',
'db3',
'db4',
'dc1',
'dc2',
'dc4',
'dcl']+[
'damp21',
'damp33']
440 'm1':
r'$m_1\,(\mathrm{M}_\odot)$',
441 'm2':
r'$m_2\,(\mathrm{M}_\odot)$',
442 'mc':
r'$\mathcal{M}\,(\mathrm{M}_\odot)$',
445 'mtotal':
r'$M_\mathrm{total}\,(\mathrm{M}_\odot)$',
446 'm1_source':
r'$m_{1}^\mathrm{source}\,(\mathrm{M}_\odot)$',
447 'm2_source':
r'$m_{2}^\mathrm{source}\,(\mathrm{M}_\odot)$',
448 'mtotal_source':
r'$M_\mathrm{total}^\mathrm{source}\,(\mathrm{M}_\odot)$',
449 'mc_source':
r'$\mathcal{M}^\mathrm{source}\,(\mathrm{M}_\odot)$',
451 'm1_source_maxldist':
r'$m_{1}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
452 'm2_source_maxldist':
r'$m_{2}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
453 'mtotal_source_maxldist':
r'$M_\mathrm{total}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
454 'mc_source_maxldist':
r'$\mathcal{M}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
455 'redshift_maxldist':
r'$z^\mathrm{maxLdist}$',
456 'mf':
r'$M_\mathrm{final}\,(\mathrm{M}_\odot)$',
457 'mf_evol':
r'$M_\mathrm{final}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
458 'mf_nonevol':
r'$M_\mathrm{final}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
459 'mf_source':
r'$M_\mathrm{final}^\mathrm{source}\,(\mathrm{M}_\odot)$',
460 'mf_source_evol':
r'$M_\mathrm{final}^\mathrm{source, evol}\,(\mathrm{M}_\odot)$',
461 'mf_source_nonevol':
r'$M_\mathrm{final}^\mathrm{source, non-evol}\,(\mathrm{M}_\odot)$',
462 'mf_source_maxldist':
r'$M_\mathrm{final}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
463 'mf_source_maxldist_evol':
r'$M_\mathrm{final}^\mathrm{source, evol - maxLdist}\,(\mathrm{M}_\odot)$',
464 'mf_source_maxldist_nonevol':
r'$M_\mathrm{final}^\mathrm{source, non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
465 'af':
r'$a_\mathrm{final}$',
466 'af_evol':
r'$a_\mathrm{final}^\mathrm{evol}$',
467 'af_nonevol':
r'$a_\mathrm{final}^\mathrm{non-evol}$',
468 'afz':
r'$a_{\mathrm{final}, z}$',
469 'afz_evol':
r'$a_{\mathrm{final}, z}^\mathrm{evol}$',
470 'afz_nonevol':
r'$a_{\mathrm{final}, z}^\mathrm{non-evol}$',
471 'e_rad':
r'$E_\mathrm{rad}\,(\mathrm{M}_\odot)$',
472 'e_rad_evol':
r'$E_\mathrm{rad}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
473 'e_rad_nonevol':
r'$E_\mathrm{rad}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
474 'e_rad_maxldist':
r'$E_\mathrm{rad}^\mathrm{maxLdist}\,(\mathrm{M}_\odot)$',
475 'e_rad_maxldist_evol':
r'$E_\mathrm{rad}^\mathrm{evol - maxLdist}\,(\mathrm{M}_\odot)$',
476 'e_rad_maxldist_nonevol':
r'$E_\mathrm{rad}^\mathrm{non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
477 'l_peak':
r'$L_\mathrm{peak}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
478 'l_peak_evol':
r'$L_\mathrm{peak}^\mathrm{evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
479 'l_peak_nonevol':
r'$L_\mathrm{peak}^\mathrm{non-evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
486 'theta1':
r'$\theta_1\,(\mathrm{rad})$',
487 'theta2':
r'$\theta_2\,(\mathrm{rad})$',
488 'phi1':
r'$\phi_1\,(\mathrm{rad})$',
489 'phi2':
r'$\phi_2\,(\mathrm{rad})$',
490 'chi_eff':
r'$\chi_\mathrm{eff}$',
491 'chi_tot':
r'$\chi_\mathrm{total}$',
492 'chi_p':
r'$\chi_\mathrm{P}$',
493 'tilt1':
r'$t_1\,(\mathrm{rad})$',
494 'tilt2':
r'$t_2\,(\mathrm{rad})$',
495 'tilt1_isco':
r'$t_1^\mathrm{ISCO}\,(\mathrm{rad})$',
496 'tilt2_isco':
r'$t_2^\mathrm{ISCO}\,(\mathrm{rad})$',
497 'costilt1':
r'$\mathrm{cos}(t_1)$',
498 'costilt2':
r'$\mathrm{cos}(t_2)$',
499 'iota':
r'$\iota\,(\mathrm{rad})$',
500 'cosiota':
r'$\mathrm{cos}(\iota)$',
501 'time':
r'$t_\mathrm{c}\,(\mathrm{s})$',
502 'time_mean':
r'$<t>\,(\mathrm{s})$',
503 'dist':
r'$d_\mathrm{L}\,(\mathrm{Mpc})$',
504 'distance_maxl':
r'$d_\mathrm{L}^\mathrm{maxL}\,(\mathrm{Mpc})$',
507 'phase':
r'$\phi\,(\mathrm{rad})$',
508 'phase_maxl':
r'$\phi^\mathrm{maxL}\,(\mathrm{rad})$',
509 'psi':
r'$\psi\,(\mathrm{rad})$',
510 'theta_jn':
r'$\theta_\mathrm{JN}\,(\mathrm{rad})$',
511 'costheta_jn':
r'$\mathrm{cos}(\theta_\mathrm{JN})$',
512 'beta':
r'$\beta\,(\mathrm{rad})$',
513 'cosbeta':
r'$\mathrm{cos}(\beta)$',
514 'phi_jl':
r'$\phi_\mathrm{JL}\,(\mathrm{rad})$',
515 'phi12':
r'$\phi_\mathrm{12}\,(\mathrm{rad})$',
516 'phi12_isco':
r'$\phi_\mathrm{12}^\mathrm{ISCO}\,(\mathrm{rad})$',
517 'logl':
r'$\mathrm{log}(\mathcal{L})$',
518 'h1_end_time':
r'$t_\mathrm{H}$',
519 'l1_end_time':
r'$t_\mathrm{L}$',
520 'v1_end_time':
r'$t_\mathrm{V}$',
521 'h1l1_delay':
r'$\Delta t_\mathrm{HL}$',
522 'h1v1_delay':
r'$\Delta t_\mathrm{HV}$',
523 'l1v1_delay':
r'$\Delta t_\mathrm{LV}$',
524 'lambdat' :
r'$\tilde{\Lambda}$',
525 'dlambdat':
r'$\delta \tilde{\Lambda}$',
526 'lambda1' :
r'$\lambda_1$',
527 'lambda2':
r'$\lambda_2$',
528 'lam_tilde' :
r'$\tilde{\Lambda}$',
529 'dlam_tilde':
r'$\delta \tilde{\Lambda}$',
530 'logp1':
r'$\log(p_1)$',
531 'gamma1':
r'$\Gamma_1$',
532 'gamma2':
r'$\Gamma_2$',
533 'gamma3':
r'$\Gamma_3$',
534 'sdgamma0' :
r'$\gamma_{0}$',
535 'sdgamma1' :
r'$\gamma_{1}$',
536 'sdgamma2' :
r'$\gamma_{2}$',
537 'sdgamma3' :
r'$\gamma_{3}$',
538 'calamp_h1' :
r'$\delta A_{H1}$',
539 'calamp_l1' :
r'$\delta A_{L1}$',
540 'calpha_h1' :
r'$\delta \phi_{H1}$',
541 'calpha_l1' :
r'$\delta \phi_{L1}$',
542 'polar_eccentricity':
r'$\epsilon_{polar}$',
543 'polar_angle':
r'$\alpha_{polar}$',
544 'alpha':
r'$\alpha_{polar}$',
545 'dchiminus1':
r'$d\chi_{-1}$',
546 'dchiminus2':
r'$d\chi_{-2}$',
547 'dchi0':
r'$d\chi_0$',
548 'dchi1':
r'$d\chi_1$',
549 'dchi2':
r'$d\chi_2$',
550 'dchi3':
r'$d\chi_3$',
551 'dchi3S':
r'$d\chi_{3S}$',
552 'dchi3NS':
r'$d\chi_{3NS}$',
553 'dchi4':
r'$d\chi_4$',
554 'dchi4S':
r'$d\chi_{4S}$',
555 'dchi4NS':
r'$d\chi_{4NS}$',
556 'dchi5':
r'$d\chi_5$',
557 'dchi5S':
r'$d\chi_{5S}$',
558 'dchi5NS':
r'$d\chi_{5NS}$',
559 'dchi5l':
r'$d\chi_{5l}$',
560 'dchi5lS':
r'$d\chi_{5lS}$',
561 'dchi5lNS':
r'$d\chi_{5lNS}$',
562 'dchi6':
r'$d\chi_6$',
563 'dchi6S':
r'$d\chi_{6S}$',
564 'dchi6NS':
r'$d\chi_{6NS}$',
565 'dchi6l':
r'$d\chi_{6l}$',
566 'dchi7':
r'$d\chi_7$',
567 'dchi7S':
r'$d\chi_{7S}$',
568 'dchi7NS':
r'$d\chi_{7NS}$',
575 'dalpha1':
r'$d\alpha_1$',
576 'dalpha2':
r'$d\alpha_2$',
577 'dalpha3':
r'$d\alpha_3$',
578 'dalpha4':
r'$d\alpha_4$',
579 'dalpha5':
r'$d\alpha_5$',
580 'dbeta1':
r'$d\beta_1$',
581 'dbeta2':
r'$d\beta_2$',
582 'dbeta3':
r'$d\beta_3$',
583 'dsigma1':
r'$d\sigma_1$',
584 'dsigma2':
r'$d\sigma_2$',
585 'dsigma3':
r'$d\sigma_3$',
586 'dsigma4':
r'$d\sigma_4$',
587 'dquadmon1':
r'$\delta\kappa_1$',
588 'dquadmon2':
r'$\delta\kappa_2$',
589 'dquadmons':
r'$\delta\kappa_s$',
590 'dquadmona':
r'$\delta\kappa_a$',
591 'domega220':
r'$d\omega_{220}$',
592 'dtau220':
r'$d\tau_{220}$',
593 'domega210':
r'$d\omega_{210}$',
594 'dtau210':
r'$d\tau_{210}$',
595 'domega330':
r'$d\omega_{330}$',
596 'dtau330':
r'$d\tau_{330}$',
597 'domega440':
r'$d\omega_{440}$',
598 'dtau440':
r'$d\tau_{440}$',
599 'domega550':
r'$d\omega_{550}$',
600 'dtau550':
r'$d\tau_{550}$',
601 'damp21':
r'$\delta A_{21}$',
602 'damp33':
r'$\delta A_{33}$',
603 'optimal_snr':
r'$\rho^{opt}$',
604 'h1_optimal_snr':
r'$\rho^{opt}_{H1}$',
605 'l1_optimal_snr':
r'$\rho^{opt}_{L1}$',
606 'v1_optimal_snr':
r'$\rho^{opt}_{V1}$',
607 'matched_filter_snr':
r'$\rho^{MF}$',
608 'lambdas':
r'$\Lambda_S$',
609 'bluni':
r'$BL_{uniform}$',
610 'log10lambda_a':
r'$\log\lambda_{\mathbb{A}} [\mathrm{m}]$',
611 'log10lambda_eff':
r'$\log\lambda_{eff} [\mathrm{m}]$',
612 'lambda_eff':
r'$\lambda_{eff} [\mathrm{m}]$',
613 'lambda_a':
r'$\lambda_{\mathbb{A}} [\mathrm{m}]$',
614 'liv_amp':
r'$\mathbb{A} [\mathrm{{eV}^{2-\alpha}}]$' ,
615 'log10livamp':
r'$\log \mathbb{A}[\mathrm{{eV}^{2-\alpha}}]$',
616 'dchikappaS':
r'$d\chi_{\kappa_{S}}$',
617 'dchikappaA':
r'$d\chi_{\kappa_{A}}$',
618 'dchiMinus1':
r'$d\chi_{-1}$',
619 'dchiMinus2':
r'$d\chi_{-2}$',
620 'db1':
r'$\delta b_1$',
621 'db2':
r'$\delta b_2$',
622 'db3':
r'$\delta b_3$',
623 'db4':
r'$\delta b_4$',
624 'dc1':
r'$\delta c_1$',
625 'dc2':
r'$\delta c_2$',
626 'dc4':
r'$\delta c_4$',
627 'dcl':
r'$\delta c_l$',
632 if param
in m1_names:
634 elif param
in m2_names:
636 elif param
in mc_names:
638 elif param
in eta_names:
640 elif param
in q_names:
642 elif param
in iota_names:
644 elif param
in dist_names:
646 elif param
in ra_names:
648 elif param
in dec_names:
650 elif param
in phase_names:
654 label = labels[param]
667 A data structure representing one parameter in a chain of posterior samples.
668 The Posterior
class generates instances of this class for pivoting onto
a given
669 parameter (the Posterior
class is per-Sampler oriented whereas this
class represents
670 the same one parameter
in successive samples
in the chain).
672 def __init__(self,name,posterior_samples,injected_value=None,injFref=None,trigger_values=None,prior=None):
674 Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
676 @param name: A literal string name
for the parameter.
677 @param posterior_samples: A 1D array of the samples.
678 @param injected_value: The injected
or real value of the parameter.
679 @param injFref: reference frequency
for injection
680 @param trigger_values: The trigger values of the parameter (dictionary w/ IFOs
as keys).
681 @param prior: The prior value corresponding to each sample.
695 Container method. Defined as number of samples.
701 Container method . Returns posterior containing sample idx (allows slicing).
708 Return the string literal name of the parameter.
716 Return the arithmetic mean for the marginal PDF on the parameter.
724 Return the median value for the marginal PDF on the parameter.
732 Return the standard deviation of the marginal PDF on the parameter.
737 if not np.isfinite(stdev):
739 except OverflowError:
747 Return the 'standard accuracy statistic' (stacc) of the marginal
748 posterior of the parameter.
750 stacc
is a standard deviant incorporating information about the
751 accuracy of the waveform recovery. Defined
as the mean of the sum
752 of the squared differences between the points
in the PDF
753 (x_i - sampled according to the posterior)
and the true value
754 (\f$x_{true}\f$). So
for a marginalized one-dimensional PDF:
755 \f$stacc = \sqrt{\frac{1}{N}\sum_{i=1}^N (x_i-x_{\rm true})2}\f$
766 Return the injected value set at construction . If no value was set
775 Return the trigger values set at construction. If no value was set
784 Set the injected/real value of the parameter.
786 @param new_injval: The injected/real value to set.
793 Set the trigger values of the parameter.
795 @param new_trigvals: Dictionary containing trigger values
with IFO keys.
803 Return a 1D numpy.array of the samples.
810 Remove samples from posterior, analagous to numpy.delete but opperates
in place.
812 @param samples: A list containing the indexes of the samples to be removed.
819 Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
822 from numpy
import seterr
as np_seterr
823 from scipy
import seterr
as sp_seterr
825 np_seterr(under=
'ignore')
826 sp_seterr(under=
'ignore')
830 exfile=open(
'exception.out',
'w')
839 """Returns the KL divergence between the prior and the posterior.
840 It measures the relative information content in nats. The prior
is evaluated
841 at run time. It defaults to
None. If
None is passed, it just returns the information content
846 return np.array([1./(np.max(x)-np.min(x))
for _
in x])
848 posterior, dx = np.histogram(self.
samples,bins=36,density=
True)
849 from scipy.stats
import entropy
854 elif prior==
'uniform':
855 prior+=
'(self.samples)'
857 prior.replace(
'x',
'self.samples')
858 elif not(prior.startswith(
'np.')):
860 prior+=
'(self.samples)'
865 prior_dist = eval(prior)
869 return entropy(posterior, qk=prior_dist)
873 Evaluate probability intervals.
875 @param intervals: A list of the probability intervals [0-1]
878 samples_temp=np.sort(np.squeeze(self.samples))
880 for interval
in intervals:
883 N=np.size(samples_temp)
885 lower_idx=
int(floor((N/2.0)*(1-interval)))
889 upper_idx=N-
int(floor((N/2.0)*(1-interval)))
893 list_of_ci.append((float(samples_temp[lower_idx]),float(samples_temp[upper_idx])))
895 list_of_ci.append((
None,
None))
901 Data structure for a table of posterior samples .
903 def __init__(self,commonResultsFormatData,SimInspiralTableEntry=None,inj_spin_frame='OrbitalL', injFref=100,SnglInspiralList=None,name=None,description=None):
907 @param commonResultsFormatData: A 2D array containing the posterior
908 samples
and related data. The samples chains form the columns.
909 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values.
910 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers.
911 @param inj_spin_frame: spin frame
912 @param injFref: reference frequency
913 @param name: optional name
914 @param description: optional description
917 common_output_table_header,common_output_table_raw =commonResultsFormatData
923 self._loglaliases=['deltalogl',
'posterior',
'logl',
'logL',
'likelihood']
924 self.
_logpaliases=[
'logp',
'logP',
'prior',
'logprior',
'Prior',
'logPrior']
926 common_output_table_header=[i.lower()
for i
in common_output_table_header]
930 'mchirp':
lambda inj:inj.mchirp,
931 'chirpmass':
lambda inj:inj.mchirp,
932 'mc':
lambda inj:inj.mchirp,
933 'mass1':
lambda inj:inj.mass1,
934 'm1':
lambda inj:inj.mass1,
935 'mass2':
lambda inj:inj.mass2,
936 'm2':
lambda inj:inj.mass2,
937 'mtotal':
lambda inj:float(inj.mass1)+float(inj.mass2),
938 'eta':
lambda inj:inj.eta,
940 'asym_massratio':self.
_inj_q,
941 'massratio':
lambda inj:inj.eta,
942 'sym_massratio':
lambda inj:inj.eta,
943 'time':
lambda inj:float(
get_end(inj)),
944 'time_mean':
lambda inj:float(
get_end(inj)),
945 'end_time':
lambda inj:float(
get_end(inj)),
946 'phi0':
lambda inj:inj.phi0,
947 'phi_orb':
lambda inj: inj.coa_phase,
948 'phase':
lambda inj: inj.coa_phase,
949 'dist':
lambda inj:inj.distance,
950 'distance':
lambda inj:inj.distance,
955 'dec':
lambda inj:inj.latitude,
956 'declination':
lambda inj:inj.latitude,
957 'lat':
lambda inj:inj.latitude,
958 'latitude':
lambda inj:inj.latitude,
959 'psi':
lambda inj: np.mod(inj.polarization, np.pi),
961 'polarisation':
lambda inj:inj.polarization,
962 'polarization':
lambda inj:inj.polarization,
966 'lal_amporder':
lambda inj:inj.amp_order}
972 for one_d_posterior_samples,param_name
in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
976 if 'mchirp' in common_output_table_header
and 'eta' in common_output_table_header \
977 and (
not 'm1' in common_output_table_header)
and (
not 'm2' in common_output_table_header):
979 print(
'Inferring m1 and m2 from mchirp and eta')
984 print(
'Unable to deduce m1 and m2 from input columns')
991 if loglalias
in common_output_table_header:
995 print(
"No '%s' column in input table!"%loglalias)
1000 raise RuntimeError(
"No likelihood/posterior values found!")
1004 if logpalias
in common_output_table_header:
1008 print(
"No '%s' column in input table!"%logpalias)
1010 if not 'log' in logpalias:
1013 if name
is not None:
1016 if description
is not None:
1023 Add some useful derived parameters (such as tilt angles, time delays, etc)
in the Posterior object
1028 if 'mc' in pos.names:
1030 elif 'chirpmass' in pos.names:
1031 mchirp_name =
'chirpmass'
1033 mchirp_name =
'mchirp'
1035 if 'asym_massratio' in pos.names:
1036 q_name =
'asym_massratio'
1040 if 'sym_massratio' in pos.names:
1041 eta_name=
'sym_massratio'
1042 elif 'massratio' in pos.names:
1043 eta_name=
'massratio'
1047 if 'mass1' in pos.names
and 'mass2' in pos.names:
1048 pos.append_mapping((
'm1',
'm2'),
lambda x,y:(x,y), (
'mass1',
'mass2'))
1050 if (mchirp_name
in pos.names
and eta_name
in pos.names)
and \
1051 (
'mass1' not in pos.names
or 'm1' not in pos.names)
and \
1052 (
'mass2' not in pos.names
or 'm2' not in pos.names):
1054 pos.append_mapping((
'm1',
'm2'),mc2ms,(mchirp_name,eta_name))
1056 if (mchirp_name
in pos.names
and q_name
in pos.names)
and \
1057 (
'mass1' not in pos.names
or 'm1' not in pos.names)
and \
1058 (
'mass2' not in pos.names
or 'm2' not in pos.names):
1060 pos.append_mapping((
'm1',
'm2'),q2ms,(mchirp_name,q_name))
1061 pos.append_mapping(
'eta',q2eta,q_name)
1063 if (
'm1' in pos.names
and 'm2' in pos.names
and not 'mtotal' in pos.names ):
1064 pos.append_mapping(
'mtotal',
lambda m1,m2: m1+m2, (
'm1',
'm2') )
1066 if(
'a_spin1' in pos.names): pos.append_mapping(
'a1',
lambda a:a,
'a_spin1')
1067 if(
'a_spin2' in pos.names): pos.append_mapping(
'a2',
lambda a:a,
'a_spin2')
1068 if(
'phi_spin1' in pos.names): pos.append_mapping(
'phi1',
lambda a:a,
'phi_spin1')
1069 if(
'phi_spin2' in pos.names): pos.append_mapping(
'phi2',
lambda a:a,
'phi_spin2')
1070 if(
'theta_spin1' in pos.names): pos.append_mapping(
'theta1',
lambda a:a,
'theta_spin1')
1071 if(
'theta_spin2' in pos.names): pos.append_mapping(
'theta2',
lambda a:a,
'theta_spin2')
1073 my_ifos=[
'h1',
'l1',
'v1']
1074 for ifo1,ifo2
in combinations(my_ifos,2):
1075 p1=ifo1+
'_cplx_snr_arg'
1076 p2=ifo2+
'_cplx_snr_arg'
1077 if p1
in pos.names
and p2
in pos.names:
1078 delta=np.mod(pos[p1].samples - pos[p2].samples + np.pi ,2.0*np.pi)-np.pi
1091 if (
'ra' in pos.names
or 'rightascension' in pos.names) \
1092 and (
'declination' in pos.names
or 'dec' in pos.names) \
1093 and 'time' in pos.names:
1094 from lal
import LIGOTimeGPS, TimeDelayFromEarthCenter
1095 from numpy
import array
1096 detMap = {
'H1':
'LHO_4k',
'H2':
'LHO_2k',
'L1':
'LLO_4k',
1097 'G1':
'GEO_600',
'V1':
'VIRGO',
'T1':
'TAMA_300'}
1098 if 'ra' in pos.names:
1100 else: ra_name=
'rightascension'
1101 if 'dec' in pos.names:
1103 else: dec_name=
'declination'
1105 my_ifos=[
'H1',
'L1',
'V1']
1110 location = lal.cached_detector_by_prefix[ifo].location
1111 ifo_times[ifo]=array(list(map(
lambda ra,dec,time: array([time[0]+TimeDelayFromEarthCenter(location,ra[0],dec[0],LIGOTimeGPS(float(time[0])))]), pos[ra_name].samples,pos[dec_name].samples,pos[
'time'].samples)))
1112 loc_end_time=
PosteriorOneDPDF(ifo.lower()+
'_end_time',ifo_times[ifo],injected_value=inj_time)
1113 pos.append(loc_end_time)
1114 for ifo1
in my_ifos:
1115 for ifo2
in my_ifos:
1116 if ifo1==ifo2:
continue
1117 delay_time=ifo_times[ifo2]-ifo_times[ifo1]
1122 time_delay=
PosteriorOneDPDF(ifo1.lower()+ifo2.lower()+
'_delay',delay_time,inj_delay)
1123 pos.append(time_delay)
1125 print(
'Warning: Could not import lal python bindings, check you ./configured with --enable-swig-python')
1126 print(
'This means I cannot calculate time delays')
1129 new_spin_params = [
'tilt1',
'tilt2',
'theta_jn',
'beta']
1130 if not set(new_spin_params).issubset(set(pos.names)):
1131 old_params = [
'f_ref',mchirp_name,
'eta',
'iota',
'a1',
'theta1',
'phi1']
1132 if 'a2' in pos.names: old_params += [
'a2',
'theta2',
'phi2']
1134 pos.append_mapping(new_spin_params, spin_angles, old_params)
1136 print(
"Warning: Cannot find spin parameters. Skipping spin angle calculations.")
1139 if 'a1' in pos.names:
1140 if 'tilt1' in pos.names:
1141 pos.append_mapping(
'a1z',
lambda a, tilt: a*np.cos(tilt), (
'a1',
'tilt1'))
1143 pos.append_mapping(
'a1z',
lambda x: x,
'a1')
1145 if injection
is not None:
1146 inj_az = injection.spin1z
1147 pos[
'a1z'].set_injval(inj_az)
1149 pos.append_mapping(
'a1',
lambda x: np.abs(x),
'a1z')
1151 if 'a2' in pos.names:
1152 if 'tilt2' in pos.names:
1153 pos.append_mapping(
'a2z',
lambda a, tilt: a*np.cos(tilt), (
'a2',
'tilt2'))
1155 pos.append_mapping(
'a2z',
lambda x: x,
'a2')
1157 if injection
is not None:
1158 inj_az = injection.spin2z
1159 pos[
'a2z'].set_injval(inj_az)
1161 pos.append_mapping(
'a2',
lambda x: np.abs(x),
'a2z')
1164 if (
'm1' in pos.names
and 'a1z' in pos.names)
and (
'm2' in pos.names
and 'a2z' in pos.names):
1165 pos.append_mapping(
'chi_eff',
lambda m1,a1z,m2,a2z: (m1*a1z + m2*a2z) / (m1 + m2), (
'm1',
'a1z',
'm2',
'a2z'))
1168 if (
'm1' in pos.names
and 'a1' in pos.names
and 'tilt1' in pos.names)
and (
'm2' in pos.names
and 'a2' in pos.names
and 'tilt2' in pos.names):
1169 pos.append_mapping(
'chi_tot',
lambda m1,a1,m2,a2: (m1*a1 + m2*a2) / (m1 + m2), (
'm1',
'a1',
'm2',
'a2'))
1172 if (
'm1' in pos.names
and 'a1' in pos.names
and 'tilt1' in pos.names)
and (
'm2' in pos.names
and 'a2' in pos.names
and 'tilt2' in pos.names):
1173 pos.append_mapping(
'chi_p', chi_precessing, [
'm1',
'a1',
'tilt1',
'm2',
'a2',
'tilt2'])
1176 if(
'distance' in pos.names):
1177 pos.append_mapping(
'redshift', calculate_redshift,
'distance')
1178 elif(
'dist' in pos.names):
1179 pos.append_mapping(
'redshift', calculate_redshift,
'dist')
1181 elif(
'distance_maxl' in pos.names):
1182 pos.append_mapping(
'redshift_maxldist', calculate_redshift,
'distance_maxl')
1185 if (
'm1' in pos.names)
and (
'redshift' in pos.names):
1186 pos.append_mapping(
'm1_source', source_mass, [
'm1',
'redshift'])
1188 if (
'm2' in pos.names)
and (
'redshift' in pos.names):
1189 pos.append_mapping(
'm2_source', source_mass, [
'm2',
'redshift'])
1191 if (
'mtotal' in pos.names)
and (
'redshift' in pos.names):
1192 pos.append_mapping(
'mtotal_source', source_mass, [
'mtotal',
'redshift'])
1194 if (
'mc' in pos.names)
and (
'redshift' in pos.names):
1195 pos.append_mapping(
'mc_source', source_mass, [
'mc',
'redshift'])
1198 if (
'm1' in pos.names)
and (
'redshift_maxldist' in pos.names):
1199 pos.append_mapping(
'm1_source_maxldist', source_mass, [
'm1',
'redshift_maxldist'])
1201 if (
'm2' in pos.names)
and (
'redshift_maxldist' in pos.names):
1202 pos.append_mapping(
'm2_source_maxldist', source_mass, [
'm2',
'redshift_maxldist'])
1204 if (
'mtotal' in pos.names)
and (
'redshift_maxldist' in pos.names):
1205 pos.append_mapping(
'mtotal_source_maxldist', source_mass, [
'mtotal',
'redshift_maxldist'])
1207 if (
'mc' in pos.names)
and (
'redshift_maxldist' in pos.names):
1208 pos.append_mapping(
'mc_source_maxldist', source_mass, [
'mc',
'redshift_maxldist'])
1211 if (
'log10lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1212 pos.append_mapping(
'log10lambda_a',
lambda z,nonGR_alpha,wl,dist:np.log10(
lambda_a(z, nonGR_alpha, 10**wl, dist)), [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1213 if (
'log10lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1214 pos.append_mapping(
'log10livamp',
lambda z,nonGR_alpha,wl,dist:np.log10(
amplitudeMeasure(z, nonGR_alpha, 10**wl, dist)), [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1215 if (
'lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1216 pos.append_mapping(
'lambda_a', lambda_a, [
'redshift',
'nonGR_alpha',
'log10lambda_eff',
'dist'])
1217 if (
'lambda_eff' in pos.names)
and (
'redshift' in pos.names):
1218 pos.append_mapping(
'liv_amp', amplitudeMeasure, [
'redshift',
'nonGR_alpha',
'lambda_eff',
'dist'])
1221 new_tidal_params = [
'lam_tilde',
'dlam_tilde']
1222 old_tidal_params = [
'lambda1',
'lambda2',
'q']
1223 if 'lambda1' in pos.names
or 'lambda2' in pos.names:
1225 pos.append_mapping(new_tidal_params, symm_tidal_params, old_tidal_params)
1227 print(
"Warning: Cannot find tidal parameters. Skipping tidal calculations.")
1230 old_spin_params = [
'iota',
'theta1',
'phi1',
'theta2',
'phi2',
'beta']
1231 new_spin_params = [
'theta_jn',
'phi_jl',
'tilt1',
'tilt2',
'phi12',
'a1',
'a2',
'm1',
'm2',
'f_ref',
'phase']
1233 if pos[
'f_ref'].samples[0][0]==0.0:
1234 for name
in [
'flow',
'f_lower']:
1235 if name
in pos.names:
1236 new_spin_params = [
'theta_jn',
'phi_jl',
'tilt1',
'tilt2',
'phi12',
'a1',
'a2',
'm1',
'm2', name]
1238 print(
"No f_ref for SimInspiralTransformPrecessingNewInitialConditions().")
1239 if set(new_spin_params).issubset(set(pos.names))
and not set(old_spin_params).issubset(set(pos.names)):
1240 pos.append_mapping(old_spin_params, physical2radiationFrame, new_spin_params)
1243 if 'spin1' in pos.names:
1244 inj_a1 = inj_a2 =
None
1246 inj_a1 = sqrt(injection.spin1x*injection.spin1x + injection.spin1y*injection.spin1y + injection.spin1z*injection.spin1z)
1247 inj_a2 = sqrt(injection.spin2x*injection.spin2x + injection.spin2y*injection.spin2y + injection.spin2z*injection.spin2z)
1250 a1_samps = abs(pos[
'spin1'].samples)
1254 print(
"Warning: problem accessing spin1 values.")
1257 a2_samps = abs(pos[
'spin2'].samples)
1261 print(
"Warning: no spin2 values found.")
1267 if len(np.intersect1d(pos.names,tidalParams)) == 0:
1271 FinalSpinFits = [
'HBR2016',
'UIB2016',
'HL2016']
1272 FinalMassFits = [
'UIB2016',
'HL2016']
1273 LpeakFits = [
'UIB2016',
'HL2016']
1277 spin_angle_suffix =
''
1278 evol_suffix =
'_evol'
1280 if all([x
in pos.names
for x
in [
'tilt1_isco',
'tilt2_isco',
'phi12_isco']]):
1281 spin_angle_suffix =
'_isco'
1282 elif all([x
in pos.names
for x
in [
'tilt1',
'tilt2',
'phi12']]):
1283 evol_suffix =
'_nonevol'
1285 zero_vec = np.array([0.])
1287 tilt1_name =
'tilt1' + spin_angle_suffix
1288 tilt2_name =
'tilt2' + spin_angle_suffix
1289 phi12_name =
'phi12' + spin_angle_suffix
1290 mf_name =
'mf' + evol_suffix
1291 mf_source_name =
'mf_source' + evol_suffix
1292 mf_source_maxldist_name =
'mf_source_maxldist' + evol_suffix
1294 if (
'm1' in pos.names)
and (
'm2' in pos.names):
1295 if (
'a1' in pos.names)
and (
'a2' in pos.names):
1296 if (tilt1_name
in pos.names)
and (tilt2_name
in pos.names)
and (phi12_name
in pos.names):
1298 print(
"Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and 3D spins).")
1299 if evol_suffix ==
'_evol':
1300 print(
"Applying these to *_isco evolved spin samples and outputting *_evol samples.")
1302 print(
"Applying these to unevolved spin samples and outputting *_nonevol samples.")
1303 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1305 pos.append_mapping(
'af' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2, phi12:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12,
'af', FinalSpinFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name, phi12_name])
1306 except Exception
as e:
1307 print(
"Could not calculate %s. The error was: %s"%(
'af' + evol_suffix,
str(e)))
1309 pos.append_mapping(
'afz' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1310 except Exception
as e:
1311 print(
"Could not calculate %s. The error was: %s"%(
'afz' + evol_suffix,
str(e)))
1313 pos.append_mapping(mf_name,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1314 except Exception
as e:
1315 print(
"Could not calculate %s. The error was: %s"%(mf_name,
str(e)))
1317 pos.append_mapping(
'l_peak' + evol_suffix,
lambda m1, m2, chi1, chi2, tilt1, tilt2:
bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2',
'a1',
'a2', tilt1_name, tilt2_name])
1318 except Exception
as e:
1319 print(
"Could not calculate %s. The error was: %s"%(
'l_peak' + evol_suffix,
str(e)))
1320 elif (
'a1z' in pos.names)
and (
'a2z' in pos.names):
1322 print(
"Using averages for final mass, final spin, and peak luminosity (on masses and projected spin components).")
1323 print(
"Outputting *_evol samples because spin evolution is trivial in this nonprecessing case.")
1324 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1327 pos.append_mapping(
'afz_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2',
'a1z',
'a2z'])
1328 except Exception
as e:
1329 print(
"Could not calculate afz_evol. The error was: %s"%(
str(e)))
1331 pos.append_mapping(
'af_evol',
lambda a: abs(a),
'afz_evol')
1332 except Exception
as e:
1333 print(
"Could not calculate af_evol. The error was: %s"%(
str(e)))
1335 pos.append_mapping(
'mf_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2',
'a1z',
'a2z'])
1336 except Exception
as e:
1337 print(
"Could not calculate mf_evol. The error was: %s"%(
str(e)))
1339 pos.append_mapping(
'l_peak_evol',
lambda m1, m2, chi1, chi2:
bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2',
'a1z',
'a2z'])
1340 except Exception
as e:
1341 print(
"Could not calculate l_peak_evol. The error was: %s"%(
str(e)))
1343 print(
"Could not calculate final parameters or Lpeak. Found samples for a1 and a2 but not for tilt angles and phi12 or spin components (a1z and a2z).")
1346 print(
"Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and zero spins).")
1347 print(
"Outputting *_evol samples because spin evolution is trivial in this nonspinning case.")
1348 print(
"Final mass fits:", FinalMassFits,
"; Final spin fits:", FinalSpinFits,
"; Peak luminosity fits:", LpeakFits)
1350 pos.append_mapping(
'afz_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'afz', FinalSpinFits), [
'm1',
'm2'])
1351 except Exception
as e:
1352 print(
"Could not calculate afz_evol. The error was: %s"%(
str(e)))
1354 pos.append_mapping(
'af_evol',
lambda a: abs(a),
'afz_evol')
1355 except Exception
as e:
1356 print(
"Could not calculate af_evol. The error was: %s"%(
str(e)))
1358 pos.append_mapping(
'mf_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'Mf', FinalMassFits), [
'm1',
'm2'])
1359 except Exception
as e:
1360 print(
"Could not calculate mf_evol. The error was: %s"%(
str(e)))
1362 pos.append_mapping(
'l_peak_evol',
lambda m1, m2:
bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec,
'Lpeak', LpeakFits), [
'm1',
'm2'])
1363 except Exception
as e:
1364 print(
"Could not calculate l_peak_evol. The error was: %s"%(
str(e)))
1367 if (mf_name
in pos.names)
and (
'redshift' in pos.names):
1369 pos.append_mapping(mf_source_name, source_mass, [mf_name,
'redshift'])
1370 except Exception
as e:
1371 print(
"Could not calculate final source frame mass. The error was: %s"%(
str(e)))
1373 if (mf_name
in pos.names)
and (
'redshift_maxldist' in pos.names):
1375 pos.append_mapping(mf_source_maxldist_name, source_mass, [mf_name,
'redshift_maxldist'])
1376 except Exception
as e:
1377 print(
"Could not calculate final source frame mass using maxldist redshift. The error was: %s"%(
str(e)))
1380 if (
'mtotal_source' in pos.names)
and (mf_source_name
in pos.names):
1382 pos.append_mapping(
'e_rad' + evol_suffix,
lambda mtot_s, mf_s: mtot_s-mf_s, [
'mtotal_source', mf_source_name])
1383 except Exception
as e:
1384 print(
"Could not calculate radiated energy. The error was: %s"%(
str(e)))
1386 if (
'mtotal_source_maxldist' in pos.names)
and (mf_source_maxldist_name
in pos.names):
1388 pos.append_mapping(
'e_rad_maxldist' + evol_suffix,
lambda mtot_s, mf_s: mtot_s-mf_s, [
'mtotal_source_maxldist', mf_source_maxldist_name])
1389 except Exception
as e:
1390 print(
"Could not calculate radiated energy using maxldist redshift results. The error was: %s"%(
str(e)))
1394 Returns a new Posterior object that contains a bootstrap
1402 samples.append(oneDpos.samples)
1404 samplesBlock=np.hstack(samples)
1406 bootstrapSamples=samplesBlock[:,:]
1407 Nsamp=bootstrapSamples.shape[0]
1409 rows=np.vsplit(samplesBlock,Nsamp)
1411 for i
in range(Nsamp):
1412 bootstrapSamples[i,:]=random.choice(rows)
1418 Remove samples from all OneDPosteriors.
1420 @param samples: The indexes of the samples to be removed.
1422 for name,pos
in self:
1423 pos.delete_samples_by_idx(samples)
1428 Remove samples containing NaN in request params.
1430 @param param_list: The parameters to be checked
for NaNs.
1432 nan_idxs = np.array(())
1434 for param
in param_list:
1435 nan_bool_array = np.isnan(self[param].samples).any(1)
1436 idxs = np.where(nan_bool_array ==
True)[0]
1438 nan_dict[param]=len(idxs)
1439 nan_idxs = np.append(nan_idxs, idxs)
1440 total_samps = len(self)
1441 nan_samps = len(nan_idxs)
1443 print(
"WARNING: removing %i of %i total samples due to NaNs:"% (nan_samps,total_samps))
1444 for param
in nan_dict.keys():
1445 print(
"\t%i NaNs in %s."%(nan_dict[param],param))
1451 """Returns the Deviance Information Criterion estimated from the
1452 posterior samples. The DIC is defined
as -2*(<log(L)> -
1453 Var(log(L))); smaller values are
"better."
1457 return -2.0*(np.mean(self.
_logL) - np.var(self.
_logL))
1462 Return the injected values.
1471 Return the trigger values .
1477 def _total_incl_restarts(self, samples):
1480 for x
in samples[1:]:
1484 total += samples[-1]
1489 Returns the number of cycles in the longest chain
1493 header=header.split()
1494 if not (
'cycle' in header):
1495 raise RuntimeError(
"Cannot compute number of cycles in longest chain")
1497 cycle_col=header.index(
'cycle')
1498 if 'chain' in header:
1499 chain_col=header.index(
'chain')
1500 chain_indexes=np.unique(samps[:,chain_col])
1502 for ind
in chain_indexes:
1503 chain_cycle_samps=samps[ samps[:,chain_col] == ind, cycle_col ]
1505 return int(max_cycle)
1512 Set the injected values of the parameters.
1514 @param injection: A SimInspiralTable row object containing the injected parameters.
1516 if injection
is not None:
1518 for name,onepos
in self:
1520 if new_injval
is not None:
1521 self[name].set_injval(new_injval)
1525 Set the trigger values of the parameters.
1527 @param triggers: A list of SnglInspiral objects.
1529 if triggers
is not None:
1531 for name,onepos
in self:
1533 if new_trigvals
is not None:
1534 self[name].set_trigvals(new_trigvals)
1537 def _getinjpar(self,paramname):
1539 Map parameter names to parameters in a SimInspiralTable .
1543 if paramname.lower().strip() == key.lower().strip():
1550 def _gettrigpar(self,paramname):
1552 Map parameter names to parameters in a SnglInspiral.
1557 if paramname.lower().strip() == key.lower().strip():
1562 except AttributeError:
1568 Container method . Returns posterior chain,one_d_pos, with name one_d_pos.name.
1574 Container method. Defined as number of samples.
1576 return len(self.
_logL)
1580 Container method. Returns iterator from self.
forward for use
in
1581 for (...)
in (...) etc.
1587 Generate a forward iterator (in sense of list of names) over Posterior
1588 with name,one_d_pos.
1591 while current_item < self.
dim:
1592 name=list(self.
_posterior.keys())[current_item]
1599 Generate a forward iterator over the list of samples corresponding to
1600 the data stored within the Posterior instance. These are returned as
1601 ParameterSamples instances.
1605 while current_item < len(self):
1606 sample_array=(np.squeeze(pos_array[current_item,:]))
1614 Return number of parameters.
1621 Return list of parameter names.
1624 for key,value
in self:
1625 nameslist.append(key)
1631 Return dict {paramName:paramMean} .
1634 for name,pos
in self:
1635 meansdict[name]=pos.mean
1641 Return dict {paramName:paramMedian} .
1644 for name,pos
in self:
1645 mediansdict[name]=pos.median
1651 Return dict {paramName:paramStandardDeviation} .
1654 for name,pos
in self:
1655 stdsdict[name]=pos.stdev
1661 Return qualified string containing the 'name' of the Posterior instance.
1668 Return qualified string containing a 'description' of the Posterior instance.
1672 def append(self,one_d_posterior):
1674 Container method. Add a new OneDParameter to the Posterior instance.
1676 self._posterior[one_d_posterior.name]=one_d_posterior
1679 def pop(self,param_name):
1681 Container method. Remove PosteriorOneDPDF from the Posterior instance.
1687 Append posteriors pos1,pos2,...=func(post_names)
1692 if isinstance(post_names, str):
1693 old_post = copy.deepcopy(self[post_names])
1694 old_inj = old_post.injval
1695 old_trigs = old_post.trigvals
1697 new_inj =
func(old_inj)
1702 for IFO
in old_trigs.keys():
1703 new_trigs[IFO] =
func(old_trigs[IFO])
1707 samps =
func(old_post.samples)
1708 new_post =
PosteriorOneDPDF(new_param_names, samps, injected_value=new_inj, trigger_values=new_trigs)
1709 if new_post.samples.ndim == 0:
1710 print(
"WARNING: No posterior calculated for %s ..." % new_post.name)
1715 old_posts = [copy.deepcopy(self[post_name])
for post_name
in post_names]
1716 old_injs = [post.injval
for post
in old_posts]
1717 old_trigs = [post.trigvals
for post
in old_posts]
1718 samps =
func(*[post.samples
for post
in old_posts])
1720 if isinstance(new_param_names, str):
1721 if None not in old_injs:
1722 inj =
func(*old_injs)
1725 if None not in old_trigs:
1727 for IFO
in old_trigs[0].keys():
1728 oldvals = [param[IFO]
for param
in old_trigs]
1729 new_trigs[IFO] =
func(*oldvals)
1732 new_post =
PosteriorOneDPDF(new_param_names, samps, injected_value=inj, trigger_values=new_trigs)
1736 if None not in old_injs:
1737 injs =
func(*old_injs)
1739 injs = [
None for name
in new_param_names]
1740 if None not in old_trigs:
1741 new_trigs = [{}
for param
in range(len(new_param_names))]
1742 for IFO
in old_trigs[0].keys():
1743 oldvals = [param[IFO]
for param
in old_trigs]
1744 newvals =
func(*oldvals)
1745 for param,newval
in enumerate(newvals):
1746 new_trigs[param][IFO] = newval
1748 new_trigs = [
None for param
in range(len(new_param_names))]
1749 if not samps: return()
1750 new_posts = [
PosteriorOneDPDF(new_param_name,samp,injected_value=inj,trigger_values=new_trigs)
for (new_param_name,samp,inj,new_trigs)
in zip(new_param_names,samps,injs,new_trigs)]
1751 for post
in new_posts:
1752 if post.samples.ndim == 0:
1753 print(
"WARNING: No posterior calculated for %s ..." % post.name)
1758 def _average_posterior(self, samples, post_name):
1760 Returns the average value of the 'post_name' column of the
1764 for samp
in samples:
1765 ap = ap + samp[post_name]
1766 return ap / len(samples)
1768 def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias = 0):
1770 Returns the average value of the posterior assuming that the
1771 'logl_name' column contains log(L)
and the
'prior_name' column
1772 contains the prior (un-logged).
1775 for samp
in samples:
1776 ap += np.exp(samp[logl_name]-log_bias)*samp[prior_name]
1777 return ap / len(samples)
1779 def _bias_factor(self):
1781 Returns a sensible bias factor for the evidence so that
1782 integrals are representable
as doubles.
1784 return np.mean(self.
_logL)
1788 Returns the log of the direct-integration evidence for the
1791 allowed_coord_names=["spin1",
"spin2",
"a1",
"phi1",
"theta1",
"a2",
"phi2",
"theta2",
1792 "iota",
"psi",
"ra",
"dec",
1793 "phi_orb",
"phi0",
"dist",
"time",
"mc",
"mchirp",
"chirpmass",
"q"]
1795 header=header.split()
1796 coord_names=[name
for name
in allowed_coord_names
if name
in header]
1797 coordinatized_samples=[
PosteriorSample(row, header, coord_names)
for row
in samples]
1798 tree=
KDTree(coordinatized_samples)
1800 if "prior" in header
and "logl" in header:
1803 elif "prior" in header
and "likelihood" in header:
1806 elif "post" in header:
1807 return np.log(tree.integrate(
lambda samps: self.
_average_posterior(samps,
"post"), boxing))
1808 elif "posterior" in header:
1809 return np.log(tree.integrate(
lambda samps: self.
_average_posterior(samps,
"posterior"), boxing))
1811 raise RuntimeError(
"could not find 'post', 'posterior', 'logl' and 'prior', or 'likelihood' and 'prior' columns in output to compute direct integration evidence")
1814 """Returns an approximation to the log(evidence) obtained by
1815 fitting an ellipse around the highest-posterior samples and
1816 performing the harmonic mean approximation within the ellipse.
1817 Because the ellipse should be well-sampled, this provides a
1818 better approximation to the evidence than the full-domain HM.
"""
1819 allowed_coord_names=["spin1",
"spin2",
"a1",
"phi1",
"theta1",
"a2",
"phi2",
"theta2",
1820 "iota",
"psi",
"ra",
"dec",
1821 "phi_orb",
"phi0",
"dist",
"time",
"mc",
"mchirp",
"chirpmass",
"q"]
1823 header=header.split()
1825 n=
int(0.05*samples.shape[0])
1829 coord_names=[name
for name
in allowed_coord_names
if name
in header]
1830 indexes=np.argsort(self.
_logL[:,0])
1832 my_samples=samples[indexes[-n:], :]
1833 my_samples=np.array([
PosteriorSample(sample,header,coord_names).coord()
for sample
in my_samples])
1835 mu=np.mean(my_samples, axis=0)
1836 cov=np.cov(my_samples, rowvar=0)
1839 for mysample
in my_samples:
1840 d=np.dot(mysample-mu, np.linalg.solve(cov, mysample-mu))
1848 for sample,logl
in zip(samples, self.
_logL):
1850 d=np.dot(coord-mu, np.linalg.solve(cov, coord-mu))
1853 ellipse_logl.append(logl)
1854 ellipse_samples.append(sample)
1856 if len(ellipse_samples) > 5*n:
1857 print(
'WARNING: ellpise evidence region encloses significantly more samples than %d'%n)
1859 ellipse_samples=np.array(ellipse_samples)
1860 ellipse_logl=np.array(ellipse_logl)
1862 ndim = len(coord_names)
1863 ellipse_volume=np.pi**(ndim/2.0)*d0**(ndim/2.0)/special.gamma(ndim/2.0+1)*np.sqrt(np.linalg.det(cov))
1866 prior_index=header.index(
'prior')
1867 pmu=np.mean(ellipse_samples[:,prior_index])
1868 pstd=np.std(ellipse_samples[:,prior_index])
1870 print(
'WARNING: prior variation greater than 100\\% over elliptical volume.')
1871 approx_prior_integral=ellipse_volume*pmu
1874 approx_prior_integral=ellipse_volume
1876 ll_bias=np.mean(ellipse_logl)
1877 ellipse_logl = ellipse_logl - ll_bias
1879 return np.log(approx_prior_integral) - np.log(np.mean(1.0/np.exp(ellipse_logl))) + ll_bias
1883 Returns the log of the harmonic mean evidence for the set of
1887 return bf + np.log(1/np.mean(1/np.exp(self.
_logL-bf)))
1891 Find the sample with maximum likelihood probability. Returns value
1892 of likelihood
and index of sample .
1894 logl_vals=self._logL
1896 max_logl=logl_vals[0]
1897 for i
in range(len(logl_vals)):
1898 if logl_vals[i] > max_logl:
1899 max_logl=logl_vals[i]
1901 return max_logl,max_i
1905 Find the sample with maximum a posteriori probability. Returns value
1906 of posterior
and index of sample .
1908 logl_vals=self._logL
1909 if self.
_logP is not None:
1910 logp_vals=self.
_logP
1915 max_pos=logl_vals[0]+logp_vals[0]
1916 for i
in range(len(logl_vals)):
1917 if logl_vals[i]+logp_vals[i] > max_pos:
1918 max_pos=logl_vals[i]+logp_vals[i]
1920 return max_pos,max_i
1922 def _print_table_row(self,name,entries):
1924 Print a html table row representation of
1926 name:item1,item2,item3,...
1929 row_str='<tr><td>%s</td>'%name
1930 for entry
in entries:
1931 row_str+=
'<td>%s</td>'%entry
1938 Return the maximum likelihood probability and the corresponding
1943 for param_name
in self.
names:
1944 maxLvals[param_name]=self.
_posterior[param_name].samples[max_i][0]
1946 return (max_logl,maxLvals)
1951 Return the maximum a posteriori probability and the corresponding
1956 for param_name
in self.
names:
1957 maxPvals[param_name]=self.
_posterior[param_name].samples[max_i][0]
1959 return (max_pos,maxPvals)
1964 Return an (M,N) numpy.array of posterior samples; M = len(self);
1969 for param_name,one_pos
in self:
1970 column=np.array(one_pos.samples)
1971 header_string+=param_name+
'\t'
1972 posterior_table.append(column)
1973 posterior_table=tuple(posterior_table)
1974 return np.column_stack(posterior_table),header_string
1978 Dump the posterior table to a file in the
'common format'.
1980 posterior_table, header_string = self.samples()
1986 header=header_string,
1991 Returns an approximation to the Gelman-Rubin statistic (see
1992 Gelman, A. and Rubin, D. B., Statistical Science, Vol 7,
1993 No. 4, pp. 457--511 (1992))
for the parameter given, accurate
1994 as the number of samples
in each chain goes to infinity. The
1995 posterior samples must have a column named
'chain' so that the
1996 different chains can be separated.
1998 from numpy
import seterr
as np_seterr
1999 np_seterr(all=
'raise')
2001 if "chain" in self.
names:
2002 chains=np.unique(self[
"chain"].samples)
2003 chain_index=self.
names.index(
"chain")
2004 param_index=self.
names.index(pname)
2006 chainData=[data[ data[:,chain_index] == chain, param_index]
for chain
in chains]
2007 allData=np.concatenate(chainData)
2008 chainMeans=[np.mean(data)
for data
in chainData]
2009 chainVars=[np.var(data)
for data
in chainData]
2010 BoverN=np.var(chainMeans)
2011 W=np.mean(chainVars)
2012 sigmaHat2=W + BoverN
2014 VHat=sigmaHat2 + BoverN/m
2018 print(
"Error when computer Gelman-Rubin R statistic for %s. This may be a fixed parameter"%pname)
2022 raise RuntimeError(
'could not find necessary column header "chain" in posterior samples')
2025 """Returns a healpix map in the pixel ordering that represents the
2026 posterior density (per square degree) on the sky. The pixels
2027 will be chosen to have at least the given resolution (in
2034 while hp.nside2resol(nside, arcmin=
True) > resol*60.0/2.0:
2037 ras = self[
'ra'].samples.squeeze()
2038 decs = self[
'dec'].samples.squeeze()
2041 thetas = np.pi/2.0 - decs
2044 inds = hp.ang2pix(nside, thetas, phis, nest=
False)
2045 counts = np.bincount(inds)
2046 if counts.shape[0] < hp.nside2npix(nside):
2047 counts = np.concatenate((counts, np.zeros(hp.nside2npix(nside) - counts.shape[0])))
2050 hpmap = hp.sphtfunc.smoothing(counts, sigma=resol*np.pi/180.0)
2052 hpmap = hpmap / (np.sum(hpmap)*hp.nside2pixarea(nside, degrees=
True))
2055 hpmap = hp.reorder(hpmap, r2n=
True)
2061 Define a string representation of the Posterior class ; returns
2062 a html formatted table of various properties of posteriors.
2064 return_val='<table border="1" id="statstable"><tr><th/>'
2065 column_names=[
'maP',
'maxL',
'stdev',
'mean',
'median',
'stacc',
'injection value']
2068 IFOs = [trig.ifo
for trig
in self.
_triggers]
2070 column_names.append(IFO+
' trigger values')
2072 for column_name
in column_names:
2073 return_val+=
'<th>%s</th>'%column_name
2077 for name,oned_pos
in self:
2080 maxL=oned_pos.samples[max_i][0]
2082 maP=oned_pos.samples[max_j][0]
2083 mean=
str(oned_pos.mean)
2084 stdev=
str(oned_pos.stdev)
2085 median=
str(np.squeeze(oned_pos.median))
2086 stacc=
str(oned_pos.stacc)
2087 injval=
str(oned_pos.injval)
2088 trigvals=oned_pos.trigvals
2090 row = [maP,maxL,stdev,mean,median,stacc,injval]
2094 row.append(
str(trigvals[IFO]))
2099 return_val+=
'</table>'
2102 parser.feed(return_val)
2106 rough_string = tostring(elem,
'utf-8')
2107 reparsed = minidom.parseString(rough_string)
2108 return_val=reparsed.toprettyxml(indent=
" ")
2109 return return_val[len(
'<?xml version="1.0" ?>')+1:]
2115 def _inj_m1(self,inj):
2117 Return the mapping of (mchirp,eta)->m1; m1>m2 i.e. return the greater of the mass
2118 components (m1) calculated
from the chirp mass
and the symmetric mass ratio.
2120 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2122 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2125 def _inj_m2(self,inj):
2127 Return the mapping of (mchirp,eta)->m2; m1>m2 i.e. return the lesser of the mass
2128 components (m2) calculated
from the chirp mass
and the symmetric mass ratio.
2130 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2132 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2135 def _inj_q(self,inj):
2137 Return the mapping of (mchirp,eta)->q; m1>m2 i.e. return the mass ratio q=m2/m1.
2139 @param inj: a custom type
with the attributes
'mchirp' and 'eta'.
2141 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2144 def _inj_longitude(self,inj):
2146 Return the mapping of longitude found in inj to the interval [0,2*pi).
2148 @param inj: a custom type
with the attribute
'longitude'.
2150 if inj.longitude>2*pi_constant
or inj.longitude<0.0:
2151 maplong=2*pi_constant*(((float(inj.longitude))/(2*pi_constant)) - floor(((float(inj.longitude))/(2*pi_constant))))
2152 print(
"Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(
str(inj.longitude),
str(maplong)))
2155 return inj.longitude
2157 def _inj_spins(self, inj, frame='OrbitalL'):
2159 from lalsimulation
import SimInspiralTransformPrecessingWvf2PE
2168 axis = lalsim.SimInspiralGetFrameAxisFromString(frame)
2175 iota=inj.inclination
2176 m1, m2 = inj.mass1, inj.mass2
2177 mc, eta = inj.mchirp, inj.eta
2179 a1, theta1, phi1 =
cart2sph(s1x, s1y, s1z)
2180 a2, theta2, phi2 =
cart2sph(s2x, s2y, s2z)
2182 spins = {
'a1':a1,
'theta1':theta1,
'phi1':phi1,
2183 'a2':a2,
'theta2':theta2,
'phi2':phi2,
2186 if inj.spin1x == inj.spin1y == inj.spin2x == inj.spin2y == 0.:
2187 spins[
'a1z'] = inj.spin1z
2188 spins[
'a2z'] = inj.spin2z
2191 S1 = np.hstack((s1x, s1y, s1z))
2192 S2 = np.hstack((s2x, s2y, s2z))
2194 zhat = np.array([0., 0., 1.])
2195 aligned_comp_spin1 =
array_dot(S1, zhat)
2196 aligned_comp_spin2 =
array_dot(S2, zhat)
2197 chi = aligned_comp_spin1 + aligned_comp_spin2 + \
2198 np.sqrt(1. - 4.*eta) * (aligned_comp_spin1 - aligned_comp_spin2)
2204 spins[
'beta'] = beta
2205 spins[
'spinchi'] = chi
2209 if not frame==
'OrbitalL':
2210 print(
"I cannot calculate the injected values of the spin angles unless frame is OrbitalL. Skipping...")
2213 theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2=SimInspiralTransformPrecessingWvf2PE(inj.inclination,inj.spin1x, inj.spin1y, inj.spin1z,inj.spin2x, inj.spin2y, inj.spin2z, m1, m2, f_ref, inj.coa_phase)
2214 spins[
'theta_jn']=theta_jn
2215 spins[
'phi12']=phi12
2216 spins[
'tilt1']=tilt1
2217 spins[
'tilt2']=tilt2
2218 spins[
'phi_jl']=phi_jl
2223 iota_back,a1x_back,a1y_back,a1z_back,a2x_back,a2y_back,a2z_back = \
2224 lalsim.SimInspiralTransformPrecessingNewInitialConditions(theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2,m1*lal.MSUN_SI,m2*lal.MSUN_SI,f_ref,inj.coa_phase)
2225 print(a1x_back,a1y_back,a1z_back)
2226 print(a2x_back,a2y_back,a2z_back)
2234 Data structure for a table of posterior samples .
2236 def __init__(self,commonResultsFormatData,SimBurstTableEntry=None,injFref=None,SnglBurstList=None,name=None,description=None):
2240 @param commonResultsFormatData: A 2D array containing the posterior
2241 samples
and related data. The samples chains form the columns.
2242 @param SimBurstTableEntry: A ligolw.lscstables.SimBurst row containing the injected values.
2243 @param SnglBurstList: A list of SnglBurst objects containing the triggers.
2244 @param injFref: reference frequency
in injection
2245 @param name: optional name
for this Posterior
2246 @param description: optional description
for this Posterior
2248 common_output_table_header,common_output_table_raw =commonResultsFormatData
2256 common_output_table_header=[i.lower()
for i
in common_output_table_header]
2260 'f0':
lambda inj:inj.frequency,
2261 'frequency':
lambda inj:inj.frequency,
2262 'centre_frequency':
lambda inj:inj.frequency,
2263 'quality':
lambda inj:inj.q,
2264 'hrss':
lambda inj:inj.hrss,
2265 'loghrss':
lambda inj:np.log(inj.hrss),
2266 'polar_angle':
lambda inj:inj.pol_ellipse_angle,
2267 'pol_ellipse_angle':
lambda inj:inj.pol_ellipse_angle,
2268 'pol_ellipse_e':
lambda inj:inj.pol_ellipse_e,
2269 'alpha':
lambda inj:inj.pol_ellipse_angle,
2270 'polar_eccentricity':
lambda inj:inj.pol_ellipse_e,
2271 'eccentricity':
lambda inj:inj.pol_ellipse_e,
2272 'time':
lambda inj:float(
get_end(inj)),
2273 'end_time':
lambda inj:float(
get_end(inj)),
2278 'dec':
lambda inj:inj.dec,
2279 'declination':
lambda inj:inj.dec,
2280 'lat':
lambda inj:inj.dec,
2281 'latitude':
lambda inj:inj.dec,
2282 'psi':
lambda inj: np.mod(inj.psi, np.pi),
2284 'polarisation':
lambda inj:inj.psi,
2285 'polarization':
lambda inj:inj.psi,
2286 'duration':
lambda inj:inj.duration,
2292 for one_d_posterior_samples,param_name
in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
2299 if loglalias
in common_output_table_header:
2303 print(
"No '%s' column in input table!"%loglalias)
2308 raise RuntimeError(
"No likelihood/posterior values found!")
2312 if logpalias
in common_output_table_header:
2316 print(
"No '%s' column in input table!"%logpalias)
2318 if not 'log' in logpalias:
2320 if name
is not None:
2323 if description
is not None:
2331 def _inj_longitude(self,inj):
2333 Return the mapping of longitude found in inj to the interval [0,2*pi).
2335 @param inj: a custom type
with the attribute
'longitude'.
2337 if inj.ra>2*pi_constant
or inj.ra<0.0:
2338 maplong=2*pi_constant*(((float(inj.ra)/(2*pi_constant)) - floor(((float(inj.ra))/(2*pi_constant)))))
2339 print(
"Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(
str(inj.ra),
str(maplong)))
2350 Construct a kD-tree from a sequence of objects. Each object
2351 should
return its coordinates using obj.coord().
2353 if len(objects) == 0:
2354 raise RuntimeError(
"cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2355 elif len(objects) == 1:
2370 sorted_objects=sorted(self.
_objects, key=
lambda obj: (obj.coord())[longest_dim])
2371 N = len(sorted_objects)
2372 bound=0.5*(sorted_objects[
int(N/2)].coord()[longest_dim] + sorted_objects[
int(N/2)-1].coord()[longest_dim])
2373 low = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] < bound]
2374 high = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] >= bound]
2378 low = [obj
for obj
in self.
_objects if obj.coord()[longest_dim]==bound]
2379 high = [obj
for obj
in self.
_objects if obj.coord()[longest_dim] > bound]
2383 def _same_coords(self, objects):
2385 True if and only
if all the given objects have the same
2388 if len(objects) <= 1:
2390 coords = [obj.coord()
for obj
in objects]
2392 for ci
in coords[1:]:
2393 if not np.all(ci == c0):
2397 def _bounds_of_objects(self):
2399 Bounds of the objects contained in the tree.
2404 low=np.minimum(low,obj.coord())
2405 high=np.maximum(high,obj.coord())
2408 def _longest_dimension(self):
2410 Longest dimension of the tree bounds.
2414 return np.argmax(widths)
2418 Returns the objects in the tree.
2424 Iterator over all the objects contained in the tree.
2430 Returns the left tree.
2436 Returns the right tree.
2442 Returns the dimension along which this level of the kD-tree
2449 Returns the coordinates of the lower-left and upper-right
2450 corners of the bounding box
for this tree: low_left, up_right
2456 Returns the volume of the bounding box of the tree.
2460 for l,h
in zip(low,high):
2466 Returns the integral of f(objects) over the tree. The
2467 optional boxing parameter determines how deep to descend into
2468 the tree before computing f.
2476 return tree.volume()*f(tree._objects)
2481 return self.
operate(x,y,boxing=boxing)
2483 def operate(self,f,g,boxing=64):
2485 Operates on tree nodes exceeding boxing parameter depth.
2496 A kD-tree suitable for splitting parameter spaces
and counting hypervolumes.
2497 Is modified
from the KDTree
class so that bounding boxes are stored. This means that
2498 there are no longer gaps
in the hypervolume once the samples have been split into groups.
2500 def __init__(self, objects,boundingbox,dims=0):
2502 Construct a kD-tree from a sequence of objects. Each object
2503 should
return its coordinates using obj.coord().
2504 the obj should also store the bounds of the hypervolume its found
in.
2505 for non-leaf objects we need the name of the dimension split
and value at split.
2510 if len(objects) == 0:
2511 raise RuntimeError(
"cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2512 elif len(objects) == 1:
2521 sorted_objects=sorted(self.
_objects, key=
lambda obj: (obj.coord())[split_dim])
2522 N = len(sorted_objects)
2523 self.
_split_value = 0.5*(sorted_objects[
int(N/2)].coord()[split_dim] + sorted_objects[
int(N/2)-1].coord()[split_dim])
2525 low = [obj
for obj
in self.
_objects if obj.coord()[split_dim] < bound]
2526 high = [obj
for obj
in self.
_objects if obj.coord()[split_dim] >= bound]
2530 low = [obj
for obj
in self.
_objects if obj.coord()[split_dim] == bound]
2531 high = [obj
for obj
in self.
_objects if obj.coord()[split_dim] > bound]
2532 leftBoundingbox = []
2533 rightBoundingbox = []
2535 leftBoundingbox.append(list(i))
2536 rightBoundingbox.append(list(i))
2537 leftBoundingbox[1][split_dim] = bound
2538 rightBoundingbox[0][split_dim] = bound
2542 if (split_dim < (len(self.
_objects[0].coord()) - 1)):
2543 child_dim = split_dim + 1
2548 if (len(high) != 0):
2553 def _same_coords(self, objects):
2555 True if and only
if all the given objects have the same
2558 if len(objects) <= 1:
2560 coords = [obj.coord()
for obj
in objects]
2562 for ci
in coords[1:]:
2563 if not np.all(ci == c0):
2569 Returns the objects in the tree.
2575 Iterator over all the objects contained in the tree.
2581 Returns the left tree.
2587 Returns the right tree.
2593 Returns the dimension along which this level of the kD-tree
2596 return self._split_dim
2600 Returns the coordinates of the lower-left and upper-right
2601 corners of the bounding box
for this tree: low_left, up_right
2607 Returns the volume of the bounding box of the tree.
2611 for l,h
in zip(low,high):
2617 Returns the integral of f(objects) over the tree. The
2618 optional boxing parameter determines how deep to descend into
2619 the tree before computing f.
2622 return tree.volume()*f(tree._objects)
2627 return self.
operate(x,y,boxing=boxing)
2629 def operate(self,f,g,boxing=64):
2631 Operates on tree nodes exceeding boxing parameter depth.
2638 def search(self,coordinates,boxing = 64):
2640 takes a set of coordinates and searches down through the tree untill it gets
2641 to a box
with less than
'boxing' objects
in it
and returns the box bounds,
2642 number of objects
in the box,
and the weighting.
2651 def fillNewTree(self,boxing = 64, isArea = False):
2653 copies tree structure, but with KDSkeleton
as the new nodes.
2657 newNode =
KDSkeleton(self.
bounds(), left_child =
None , right_child =
None)
2675 object to store the structure of a kd tree
2678 def __init__(self, bounding_box, left_child = None, right_child = None):
2681 self.
_left = left_child
2682 self.
_right = right_child
2695 def search(self,coordinates):
2697 takes a set of coordinates and searches down through the tree untill it gets
2698 to a box
with less than
'boxing' objects
in it
and returns the box bounds,
2699 number of objects
in the box,
and the weighting.
2701 if self.
_left is None:
2719 A single parameter sample object, suitable for inclusion
in a
2723 def __init__(self, sample_array, headers, coord_names):
2725 Given the sample array, headers for the values,
and the names
2726 of the desired coordinates, construct a parameter sample
2731 if not (len(sample_array) == len(self.
_headers)):
2732 print(
"Header length = ", len(self.
_headers))
2733 print(
"Sample length = ", len(sample_array))
2734 raise RuntimeError(
"parameter and sample lengths do not agree")
2740 Return the element with the corresponding name.
2747 raise KeyError(
"key not found in posterior sample: %s"%key)
2751 Return the coordinates for the parameter sample.
2761 Return analytic likelihood values.
2764 def __init__(self, covariance_matrix_files, mean_vector_files):
2766 Prepare analytic likelihood for the given parameters.
2769 if isinstance(covariance_matrix_files, str):
2770 covariance_matrix_files = [covariance_matrix_files]
2771 if isinstance(mean_vector_files, str):
2772 mean_vector_files = [mean_vector_files]
2774 covarianceMatrices = [np.loadtxt(csvFile, delimiter=
',')
for csvFile
in covariance_matrix_files]
2775 num_matrices = len(covarianceMatrices)
2777 if num_matrices != len(mean_vector_files):
2778 raise RuntimeError(
'Must give a mean vector list for every covariance matrix')
2780 param_line = open(mean_vector_files[0]).readline()
2781 self.
_params = [param.strip()
for param
in param_line.split(
',')]
2783 converter=
lambda x: eval(x.replace(
'pi',
'%.32f'%pi_constant))
2785 for i
in range(num_matrices):
2786 CM = covarianceMatrices[i]
2787 vecFile = mean_vector_files[i]
2789 param_line = open(vecFile).readline()
2790 params = [param.strip()
for param
in param_line.split(
',')]
2791 if set(params)!=set(self.
_params):
2792 raise RuntimeError(
'Parameters do not agree between mean vector files.')
2794 sigmas = dict(zip(params,np.sqrt(CM.diagonal())))
2795 colNums = range(len(params))
2796 converters = dict(zip(colNums,[converter
for i
in colNums]))
2797 meanVectors = np.loadtxt(vecFile, delimiter=
',', skiprows=1, converters=converters)
2799 for vec
in meanVectors:
2800 means = dict(zip(params,vec))
2801 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param]))
for param
in params]
2802 self.
_modes.append(dict(mode))
2804 means = dict(zip(params,meanVectors))
2805 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param]))
for param
in params]
2806 self.
_modes.append(dict(mode))
2809 def pdf(self, param):
2811 Return PDF function for parameter.
2818 def cdf(self, param):
2820 Return PDF function for parameter.
2830 Return list of parameter names described by analytic likelihood function.
2842 A base class for representing web content using ElementTree .
2846 self.
_html=Element(tag)
2848 for attribname,attribvalue
in attrib.items():
2849 self.
_html.attrib[attribname]=attribvalue
2851 parent.append(self.
_html)
2855 Return a pretty-printed XML string of the htmlPage.
2858 rough_string = tostring(elem)
2859 reparsed = minidom.parseString(rough_string)
2860 return reparsed.toprettyxml(indent=
" ")
2871 def p(self,pstring):
2911 def a(self,url,linktext):
2913 Ea.attrib[
'href']=url
2920 if idtable
is not None:
2923 Etab=Element(
'table',args)
2930 Insert row in table tab.
2931 If given, label used
as id
for the table tag
2935 if label
is not None:
2936 Etr.attrib[
'id']=label
2940 def insert_td(self,row,td,label=None,legend=None):
2942 Insert cell td into row row.
2943 Sets id to label, if given
2952 td=minidom.parseString(td)
2953 td=td.toprettyxml(indent=
" ")
2955 if label
is not None:
2956 Etd.attrib[
'id']=label
2957 if legend
is not None:
2958 legend.a(
'#%s'%label,
'%s'%label)
2970 A concrete class for generating an XHTML(1) document. Inherits
from htmlChunk.
2972 def __init__(self,title=None,css=None,javascript=None,toc=False):
2973 htmlChunk.__init__(self,
'html',attrib={
'xmlns':
"http://www.w3.org/1999/xhtml"})
2974 self.
doctype_str=
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
2977 Etitle=SubElement(self.
_head,
'title')
2981 if title
is not None:
2982 Etitle.text=
str(title)
2985 if javascript
is not None:
2987 self.
_jscript.attrib[
'type']=
"text/javascript"
2990 self.
_css=SubElement(self.
_head,
'style')
2991 self.
_css.attrib[
'type']=
"text/css"
3000 if legend
is not None:
3001 legend.a(
'#%s'%section_name,
'%s'%section_name)
3007 Create a section embedded into a table that can be collapsed with a button
3009 newSection=htmlCollapseSection(section_name,table_id=innertable_id,start_closed=start_closed)
3011 if legend
is not None:
3012 legend.a(
'#%s'%section_name,
'%s'%section_name)
3018 Create a section which is not appended to the body of html, but to the parent Element
3020 newSection=htmlSection(section_name,htmlElement=parent,blank=True)
3021 parent.append(newSection._html)
3036 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3038 def __init__(self,section_name,htmlElement=None,blank=False):
3040 htmlChunk.__init__(self,
'div',attrib={
'class':
'ppsection',
'id':section_name},parent=htmlElement)
3042 htmlChunk.__init__(self,
'div',attrib={
'style':
'"color:#000000"',
'id':section_name},parent=htmlElement)
3043 self.
h2(section_name)
3048 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3051 def __init__(self,section_name,htmlElement=None,table_id=None,start_closed=True):
3052 htmlChunk.__init__(self,
'div',attrib={
'class':
'ppsection',
'id':section_name},parent=htmlElement)
3054 if table_id
is None:
3055 table_id=random.randint(1,10000000)
3060 k=random.randint(1,10000000)
3062 st=
'<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[+] Expand" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self.
_html.attrib[
'id'],k,self.
table_id,k)
3063 string=string.replace(
'table ',
'table style="display: none" ')
3065 st=
'<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[-] Collapse" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self.
_html.attrib[
'id'],k,self.
table_id,k)
3066 string=string.replace(
'table ',
'table style="display: table" ')
3068 st+=
'</td></tr></table>'
3069 htmlChunk.write(self,st)
3076def _calculate_confidence_levels(hist, points, injBin, NSamples):
3078 Returns (injectionconf, toppoints), where injectionconf is the
3079 confidence level of the injection, contained
in the injBin
and
3080 toppoints
is a list of (pointx, pointy, ptindex, frac),
with
3081 pointx
and pointy the (x,y) coordinates of the corresponding
3082 element of the points array, ptindex the index of the point
in the
3083 array,
and frac the cumulative fraction of points
with larger
3084 posterior probability.
3086 The hist argument should be a one-dimensional array that contains
3087 counts of sample points
in each bin.
3089 The points argument should be a 2-D array storing the sky location
3090 associated
with each bin; the first index runs
from 0 to NBins -
3091 1,
while the second index runs
from 0 to 1.
3093 The injBin argument gives the bin index
in which the injection
is
3096 The NSamples argument
is used to normalize the histogram counts
3097 into fractional probability.
3100 histIndices=np.argsort(hist)[::-1]
3105 for i
in histIndices:
3106 frac+=float(hist[i])/float(NSamples)
3107 toppoints.append((points[i,0], points[i,1], i, frac))
3110 print(
'Injection found at confidence level %g'%injConf)
3112 return (injConf, toppoints)
3114def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels):
3116 An interal function representing the common, dimensionally-independent part of the
3117 greedy binning algorithms.
3121 (injectionconfidence,toppoints)=_calculate_confidence_levels(greedyHist, greedyPoints, injection_bin_index, Nsamples)
3125 confidence_levels.sort()
3127 toppoints=np.array(toppoints)
3128 for printcl
in confidence_levels:
3129 nBins=np.searchsorted(toppoints[:,3], printcl) + 1
3131 if nBins >= len(toppoints):
3132 nBins=len(toppoints)-1
3134 accl=toppoints[nBins-1,3]
3136 reses[printcl]=nBins*bin_size
3140 if injection_bin_index
and injectionconfidence:
3141 i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0]
3142 injection_area=bin_size*(i+1)
3144 return toppoints,injectionconfidence,reses,injection_area
3149 return - (cos(pi_constant/2. - bounds[0][1])-cos(pi_constant/2. - bounds[1][1]))*(bounds[1][0] - bounds[0][0])
3152 size =
int(len(items)*fraction)
3153 random.shuffle(items)
3154 return items[:size], items[size:]
3157 if tree._left
is None:
3159 elif coordinates[tree._splitDim] < tree._splitValue:
3171 confidence_levels.sort()
3173 class Harvester(list):
3179 def __call__(self,tree):
3180 number_density=float(len(tree.objects()))/float(tree.volume())
3181 self.append([number_density,tree.volume(),tree.bounds()])
3182 self.unrho+=number_density
3184 def close_ranks(self):
3186 for i
in range(len(self)):
3187 self[i][0]/=self.unrho
3189 return sorted(self,key=itemgetter(0))
3194 samples,header=posterior.samples()
3195 header=header.split()
3196 coord_names=[
"ra",
"dec",
"dist"]
3197 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3198 tree=KDTree(coordinatized_samples)
3202 tree.operate(a,h,boxing=samples_per_bin)
3210 confidence_intervals={}
3211 for rho,vol,bounds
in b:
3215 if acc_rho>confidence_levels[cl_idx]:
3216 confidence_intervals[acc_rho]=acc_vol
3218 if cl_idx==len(confidence_levels):
3221 return confidence_intervals
3225 takes samples and applies a KDTree to them to
return confidence levels
3226 returns confidence_intervals - dictionary of user_provided_CL:calculated_area
3227 b - ordered list of KD leaves
3228 injInfo -
if injection values provided then returns
3229 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_area]
3230 Not quite sure that the repeated samples case
is fixed, posibility of infinite loop.
3232 confidence_levels.sort()
3233 from math
import cos, pi
3234 class Harvester(list):
3236 when called by kdtree.operate will be used to calculate the density of each bin (sky area)
3242 def __call__(self,tree):
3244 area = - (cos(pi/2. - tree.bounds()[0][1])-cos(pi/2. - tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3245 number_density=float(len(tree.objects()))/float(area)
3246 self.append([number_density,len(tree.objects()),area,tree.bounds()])
3247 self.unrho+=number_density
3249 def close_ranks(self):
3251 for i
in range(len(self)):
3252 self[i][0]/=self.unrho
3254 return sorted(self,key=itemgetter(0))
3259 peparser=PEOutputParser(
'common')
3261 samples,header=posterior.samples()
3262 header=header.split()
3263 coord_names=[
"ra",
"dec"]
3264 initial_dimensions = [[0.,-pi/2.],[2.*pi, pi/2.]]
3265 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3266 tree=KDTreeVolume(coordinatized_samples,initial_dimensions)
3269 tree.operate(a,h,boxing=samples_per_bin)
3270 totalSamples = len(tree.objects())
3275 samplecounter += entry[1]
3276 entry[1] = float(samplecounter)/float(totalSamples)
3283 if posterior[
'ra'].injval
is not None and posterior[
'dec'].injval
is not None:
3284 injBound,injNum,injWeight = tree.search([posterior[
'ra'].injval,posterior[
'dec'].injval],boxing = samples_per_bin)
3285 injInfo = [injBound,injNum,injWeight]
3286 inj_area = - (cos(pi/2. - injBound[0][1])-cos(pi/2. - injBound[1][1]))*(injBound[1][0] - injBound[0][0])
3287 inj_number_density=float(injNum)/float(inj_area)
3288 inj_rho = inj_number_density / a.unrho
3292 inj_number_density=
None
3296 confidence_intervals={}
3297 for rho,confidence_level,vol,bounds
in b:
3300 if confidence_level>confidence_levels[cl_idx]:
3301 print(
str(confidence_level))
3303 confidence_intervals[confidence_levels[cl_idx]]=acc_vol
3305 if cl_idx==len(confidence_levels):
3309 for rho,sample_number,vol,bounds
in b:
3311 print(
'total area: ' +
str(acc_vol))
3314 inj_confidence =
None
3315 inj_confidence_area =
None
3316 if inj_rho
is not None:
3318 for rho,confidence_level,vol,bounds
in b:
3321 inj_confidence = confidence_level
3322 inj_confidence_area = acc_vol
3323 injInfo.append(inj_confidence)
3324 injInfo.append(inj_confidence_area)
3325 print(
'inj ' +
str(vol))
3327 return confidence_intervals, b, injInfo
3329def kdtree_bin(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10):
3331 takes samples and applies a KDTree to them to
return confidence levels
3332 returns confidence_intervals - dictionary of user_provided_CL:calculated_volume
3333 b - ordered list of KD leaves
3334 initial_boundingbox - list of lists [upperleft_coords,lowerright_coords]
3335 injInfo -
if injection values provided then returns
3336 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_volume]
3337 Not quite sure that the repeated samples case
is fixed, posibility of infinite loop.
3339 confidence_levels.sort()
3340 print(confidence_levels)
3341 class Harvester(list):
3343 when called by kdtree.operate will be used to calculate the density of each bin
3349 def __call__(self,tree):
3350 number_density=float(len(tree.objects()))/float(tree.volume())
3351 self.append([number_density,len(tree.objects()),tree.volume(),tree.bounds()])
3352 self.unrho+=number_density
3354 def close_ranks(self):
3356 for i
in range(len(self)):
3357 self[i][0]/=self.unrho
3359 return sorted(self,key=itemgetter(0))
3364 peparser=PEOutputParser(
'common')
3366 samples,header=posterior.samples()
3367 header=header.split()
3368 coordinatized_samples=[PosteriorSample(row, header, coord_names)
for row
in samples]
3371 if initial_boundingbox
is None:
3372 low=coordinatized_samples[0].coord()
3373 high=coordinatized_samples[0].coord()
3374 for obj
in coordinatized_samples[1:]:
3375 low=np.minimum(low,obj.coord())
3376 high=np.maximum(high,obj.coord())
3377 initial_boundingbox = [low,high]
3379 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3382 tree.operate(a,h,boxing=samples_per_bin)
3386 totalSamples = len(tree.objects())
3389 samplecounter += entry[1]
3390 entry[1] = float(samplecounter)/float(totalSamples)
3397 def checkNone(listoParams):
3398 for param
in listoParams:
3399 if posterior[param].injval
is None:
3404 if checkNone(coord_names):
3405 injBound,injNum,injWeight = tree.search([posterior[x].injval
for x
in coord_names],boxing = samples_per_bin)
3406 injInfo = [injBound,injNum,injWeight]
3411 for aCoord,bCoord
in zip(low,high):
3412 inj_volume = inj_volume*(bCoord - aCoord)
3413 inj_number_density=float(injNum)/float(inj_volume)
3414 inj_rho = inj_number_density / a.unrho
3415 print(injNum,inj_volume,inj_number_density,a.unrho,injBound)
3419 inj_number_density=
None
3423 confidence_intervals={}
3424 for rho,sample_number,vol,bounds
in b:
3427 if sample_number>confidence_levels[cl_idx]:
3428 confidence_intervals[confidence_levels[cl_idx]]=(acc_vol,sample_number)
3430 if cl_idx==len(confidence_levels):
3434 for rho,sample_number,vol,bounds
in b:
3438 inj_confidence =
None
3439 inj_confidence_area =
None
3440 if inj_rho
is not None:
3441 print(
'calculating cl')
3443 for rho,confidence_level,vol,bounds
in b:
3446 inj_confidence = confidence_level
3447 inj_confidence_area = acc_vol
3448 injInfo.append(inj_confidence)
3449 injInfo.append(inj_confidence_area)
3452 return confidence_intervals, b, initial_boundingbox,injInfo
3454def kdtree_bin2Step(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10,injCoords = None,alternate = False, fraction = 0.5, skyCoords=False):
3456 input: posterior class instance, list of confidence
levels, optional choice of inital parameter space,
samples per box in kdtree
3457 note initial_boundingbox
is [[lowerbound of each param][upper bound of each param]],
if not specified will just take limits of samples
3458 fraction
is proportion of samples used
for making the tree structure.
3459 returns: confidence_intervals, sorted list of kd objects, initial_boundingbox, injInfo
3460 where injInfo
is [bounding box injection
is found within, samples
in said box, weighting of box (
in case of repeated samples),inj_confidence, inj_confidence_area]
3462 confidence_levels.sort()
3464 samples,header=posterior.samples()
3465 numberSamples = len(samples)
3466 if alternate ==
False:
3467 samplesStructure, samplesFill =
random_split(samples,fraction)
3469 samplesStructure = samples[:
int(numberSamples*fraction)]
3470 samplesFill = samples[
int(numberSamples*fraction):]
3471 samplesFillLen = len(samplesFill)
3473 header=header.split()
3474 coordinatized_samples=[
PosteriorSample(row, header, coord_names)
for row
in samplesStructure]
3476 if skyCoords ==
True:
3477 initial_boundingbox = [[0,-pi_constant/2.],[2*pi_constant,pi_constant/2.]]
3478 if initial_boundingbox
is None:
3479 low=coordinatized_samples[0].coord()
3480 high=coordinatized_samples[0].coord()
3481 for obj
in coordinatized_samples[1:]:
3482 low=np.minimum(low,obj.coord())
3483 high=np.maximum(high,obj.coord())
3484 initial_boundingbox = [low,high]
3485 tree=
KDTreeVolume(coordinatized_samples,initial_boundingbox)
3486 tree2fill = tree.fillNewTree(boxing=samples_per_bin, isArea = skyCoords)
3489 for name
in coord_names:
3490 columns.append(header.index(name))
3492 for sample
in samplesFill:
3494 for column
in columns:
3495 tempSample.append(sample[column])
3498 def getValues(tree,listing):
3499 if tree._left
is None:
3500 listing.append([tree.bounds(),tree._importance,tree._samples,tree._volume])
3502 getValues(tree._left,listing)
3503 getValues(tree._right,listing)
3506 getValues(tree2fill,listLeaves)
3509 for cl
in confidence_levels:
3510 clSamples.append(samplesFillLen*cl)
3512 sortedLeavesList = sorted(listLeaves, key=
lambda importance: importance[1])
3513 sortedLeavesList.reverse()
3514 runningTotalSamples = 0
3515 for i
in range(len(sortedLeavesList)):
3516 runningTotalSamples += sortedLeavesList[i][2]
3517 sortedLeavesList[i].append(float(runningTotalSamples)/samplesFillLen,)
3523 lencl = len(clSamples)
3525 confidence_intervals={}
3526 interpConfAreas = {}
3528 for leaf
in sortedLeavesList:
3529 countSamples += leaf[2]
3532 if level < lencl
and countSamples >= clSamples[level]:
3533 confidence_intervals[confidence_levels[level]]=(volume,float(countSamples)/samplesFillLen)
3534 interpConfAreas[confidence_levels[level]] = volume-leaf[3]*(countSamples-clSamples[level])/leaf[2]
3537 if injCoords
is not None:
3538 injBound,injNum,injImportance = tree2fill.search(injCoords)
3539 injInfo = [injBound,injNum,injImportance]
3545 inj_confidence =
None
3546 inj_confidence_area =
None
3547 if injInfo
is not None:
3550 for leaf
in sortedLeavesList:
3553 if leaf[1] <= injImportance:
3554 inj_confidence = float(acc_cl)/samplesFillLen
3555 inj_confidence_area = acc_vol
3556 injInfo.append(inj_confidence)
3557 injInfo.append(inj_confidence_area)
3560 return sortedLeavesList, interpConfAreas, injInfo
3562 def checkNone(listoParams):
3563 for param
in listoParams:
3564 if posterior[param].injval
is None:
3570 Determine the 2-parameter Bayesian Confidence Intervals using a greedy
3573 @param posterior: an instance of the Posterior
class.
3575 @param greedy2Params: a dict - {param1Name:param1binSize,param2Name:param2binSize} .
3577 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
3581 par1_name,par2_name=greedy2Params.keys()
3584 par1pos=posterior[par1_name.lower()].samples
3585 par2pos=posterior[par2_name.lower()].samples
3588 par1_bin=greedy2Params[par1_name]
3589 par2_bin=greedy2Params[par2_name]
3592 par1_injvalue=posterior[par1_name.lower()].injval
3593 par2_injvalue=posterior[par2_name.lower()].injval
3596 par1pos_min=min(par1pos)[0]
3597 par2pos_min=min(par2pos)[0]
3599 par1pos_max=
max(par1pos)[0]
3600 par2pos_max=
max(par2pos)[0]
3602 par1pos_Nbins=
int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
3604 par2pos_Nbins=
int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
3606 greedyHist = np.zeros(par1pos_Nbins*par2pos_Nbins,dtype=
'i8')
3607 greedyPoints = np.zeros((par1pos_Nbins*par2pos_Nbins,2))
3610 par1_point=par1pos_min
3611 par2_point=par2pos_min
3612 for i
in range(par2pos_Nbins):
3614 par1_point=par1pos_min
3615 for j
in range(par1pos_Nbins):
3617 greedyPoints[j+par1pos_Nbins*i,0]=par1_point
3618 greedyPoints[j+par1pos_Nbins*i,1]=par2_point
3619 par1_point+=par1_bin
3620 par2_point+=par2_bin
3625 if par1_injvalue
is not None and par2_injvalue
is not None:
3627 par1_binNumber=
int(floor((par1_injvalue-par1pos_min)/par1_bin))
3628 par2_binNumber=
int(floor((par2_injvalue-par2pos_min)/par2_bin))
3630 injbin=
int(par1_binNumber+par2_binNumber*par1pos_Nbins)
3631 elif par1_injvalue
is None and par2_injvalue
is not None:
3632 print(
"Injection value not found for %s!"%par1_name)
3634 elif par1_injvalue
is not None and par2_injvalue
is None:
3635 print(
"Injection value not found for %s!"%par2_name)
3638 for par1_samp,par2_samp
in zip(par1pos,par2pos):
3639 par1_samp=par1_samp[0]
3640 par2_samp=par2_samp[0]
3641 par1_binNumber=
int(floor((par1_samp-par1pos_min)/par1_bin))
3642 par2_binNumber=
int(floor((par2_samp-par2pos_min)/par2_bin))
3644 greedyHist[par1_binNumber+par2_binNumber*par1pos_Nbins]+=1
3646 raise RuntimeError(
"Problem binning samples: %i,%i,%i,%i,%i,%f,%f,%f,%f,%f,%f .")%(par1_binNumber,par2_binNumber,par1pos_Nbins,par2pos_Nbins,par1_binNumber+par2_binNumber*par1pos_Nbins,par1_samp,par1pos_min,par1_bin,par1_samp,par2pos_min,par2_bin)
3648 toppoints,injection_cl,reses,injection_area=\
3653 float(par1_bin*par2_bin),
3658 return toppoints,injection_cl,reses,injection_area
3662 Utility function to convert longitude,latitude on a unit sphere to
3663 cartesian co-ordinates.
3666 x=np.cos(lat)*np.cos(long)
3667 y=np.cos(lat)*np.sin(long)
3669 return np.array([x,y,z])
3674 Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
3676 x = r*np.sin(theta)*np.cos(phi)
3677 y = r*np.sin(theta)*np.sin(phi)
3684 Utility function to convert cartesian coords to r,theta,phi.
3686 r = np.sqrt(x*x + y*y + z*z)
3687 theta = np.arccos(z/r)
3688 phi = np.fmod(2*pi_constant + np.arctan2(y,x), 2*pi_constant)
3695 """Plots a sky map from a healpix map, optionally including an
3696 injected position. This is a temporary map to display before
3697 ligo.skymap utility
is used to generated a smoother one.
3699 :param hpmap: An array representing a healpix map (
in nested
3700 ordering
if ``nest =
True``).
3702 :param outdir: The output directory.
3704 :param inj: If
not ``
None``, then ``[ra, dec]`` of the injection
3705 associated
with the posterior map.
3707 :param nest: Flag indicating the pixel ordering
in healpix.
3711 fig = plt.figure(frameon=False, figsize=(8,6))
3712 hp.mollview(hpmap, nest=nest, min=0, max=np.max(hpmap), cmap=
'Greys', coord=
'E', fig=fig.number, title=
'Histogrammed skymap' )
3713 plt.grid(
True,color=
'g',figure=fig)
3716 theta = np.pi/2.0 - inj[1]
3717 hp.projplot(theta, inj[0],
'*', markerfacecolor=
'white', markeredgecolor=
'black', markersize=10)
3719 plt.savefig(os.path.join(outdir,
'skymap.png'))
3724 """Returns the area (in square degrees) for each confidence level with
3725 a greedy binning algorithm for the given healpix map.
3729 hpmap = hpmap / np.sum(hpmap)
3731 hpmap = np.sort(hpmap)[::-1]
3732 cum_hpmap = np.cumsum(hpmap)
3734 pixarea = hp.nside2pixarea(hp.npix2nside(hpmap.shape[0]))
3735 pixarea = pixarea*(180.0/np.pi)**2
3739 npix = np.sum(cum_hpmap < cl)
3740 areas.append(npix*pixarea)
3742 return np.array(areas)
3745 """Returns the greedy p-value estimate for the given injection.
3749 nside = hp.npix2nside(hpmap.shape[0])
3750 hpmap = hpmap / np.sum(hpmap)
3752 injpix = hp.ang2pix(nside, np.pi/2.0-inj[1], inj[0], nest=nest)
3753 injvalue = hpmap[injpix]
3755 return np.sum(hpmap[hpmap >= injvalue])
3761 Utility function for converting mchirp,eta to component masses. The
3762 masses are defined so that m1>m2. The rvalue
is a tuple (m1,m2).
3764 root = np.sqrt(0.25-eta)
3765 fraction = (0.5+root) / (0.5-root)
3766 invfraction = 1/fraction
3768 m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)
3770 m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)
3777 Utility function for converting mchirp,q to component masses. The
3778 masses are defined so that m1>m2. The rvalue
is a tuple (m1,m2).
3780 factor = mc * np.power(1+q, 1.0/5.0);
3781 m1 = factor * np.power(q, -3.0/5.0);
3782 m2 = factor * np.power(q, 2.0/5.0);
3789 Utility function for converting q to eta. The
3792 eta = q/((1+q)*(1+q))
3793 return np.clip(eta,0,0.25)
3799 Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
3801 m1,m2 = mc2ms(mc,eta)
3807def ang_dist(long1,lat1,long2,lat2):
3809 Find the angular separation of (long1,lat1) and (long2,lat2), which are
3810 specified
in radians.
3813 x1=np.cos(lat1)*np.cos(long1)
3814 y1=np.cos(lat1)*np.sin(long1)
3816 x2=np.cos(lat2)*np.cos(long2)
3817 y2=np.cos(lat2)*np.sin(long2)
3819 sep=np.acos(x1*x2+y1*y2+z1*z2)
3826 Calculate dot products between vectors in rows of numpy arrays.
3829 product = (vec1*vec2).sum()
3831 product = (vec1*vec2).sum(axis=1).reshape(-1,1)
3838 Find angles between vectors in rows of numpy arrays.
3840 vec1_mag = np.sqrt(array_dot(vec1, vec1))
3841 vec2_mag = np.sqrt(array_dot(vec2, vec2))
3842 return np.arccos(
array_dot(vec1, vec2)/(vec1_mag*vec2_mag))
3848 Find polar angles of vectors in rows of a numpy array.
3853 z = vec[:,2].reshape(-1,1)
3855 return np.arccos(z/norm)
3861 Compute general rotation matrices for a given angles
and direction vectors.
3863 cosa = np.cos(angle)
3864 sina = np.sin(angle)
3865 direction /= np.sqrt(array_dot(direction,direction))
3869 R = np.array( [np.diag([i,i,i])
for i
in cosa.flat] )
3870 R += np.array( [np.outer(direction[i],direction[i])*(1.0-cosa[i])
for i
in range(nSamps)] )
3871 R += np.array( [np.array( [[ 0.0, -direction[i,2], direction[i,1]],
3872 [ direction[i,2], 0.0, -direction[i,0]],
3873 [-direction[i,1], direction[i,0], 0.0 ]] ) * sina[i]
for i
in range(nSamps)] )
3876 R = np.diag([cosa,cosa,cosa])
3877 R += np.outer(direction,direction) * (1.0 - cosa)
3878 R += np.array( [[ 0.0, -direction[2], direction[1]],
3879 [ direction[2], 0.0, -direction[0]],
3880 [-direction[1], direction[0], 0.0 ]] ) * sina
3886 tmp1 = vx*np.cos(angle) - vy*np.sin(angle);
3887 tmp2 = vx*np.sin(angle) + vy*np.cos(angle);
3888 return np.asarray([tmp1,tmp2,vz])
3892 tmp1 = vx*np.cos(angle) + vz*np.sin(angle);
3893 tmp2 = - vx*np.sin(angle) + vz*np.cos(angle);
3894 return np.asarray([tmp1,vy,tmp2])
3898 Calculate orbital angular momentum vector.
3899 Note: The units of Lmag are different than what used in lalsimulation.
3900 Mc must be called
in units of Msun here.
3902 Note that
if one wants to build J=L+S1+S2
with L returned by this function, S1
and S2
3903 must
not get the Msun^2 factor.
3905 eta = m1*m2/( (m1+m2)*(m1+m2) )
3907 Lx, Ly, Lz = sph2cart(Lmag, inclination, 0.0)
3908 return np.hstack((Lx,Ly,Lz))
3912 v0 = np.power((m1+m2) *pi_constant * lal.MTSUN_SI * fref, 1.0/3.0)
3914 PNFirst = (((m1+m2)**2)*eta)/v0
3915 PNSecond = 1+ (v0**2) * (3.0/2.0 +eta/6.0)
3916 Lmag= PNFirst*PNSecond
3921 Calculate BH angular momentum vector.
3923 Sx, Sy, Sz = sph2cart(m**2 * a, theta, phi)
3924 return np.hstack((Sx,Sy,Sz))
3930 Calculate best tidal parameters [Eqs. (5) and (6)
in Wade et al. PRD 89, 103012 (2014)]
3933 lambdap = lambda1 + lambda2
3934 lambdam = lambda1 - lambda2
3938 raise ValueError(
"q > 1, while this function requires q <= 1.")
3940 dmbym = (1. - q)/(1. + q)
3944 lam_tilde = (8./13.)*((1.+7.*eta-31.*eta*eta)*lambdap + dmbym*(1.+9.*eta-11.*eta*eta)*lambdam)
3945 dlam_tilde = (1./2.)*(dmbym*(1.-13272.*eta/1319.+8944.*eta*eta/1319.)*lambdap + (1.-15910.*eta/1319.+32850.*eta*eta/1319.+3380.*eta*eta*eta/1319.)*lambdam)
3946 return lam_tilde, dlam_tilde
3948def spin_angles(fref,mc,eta,incl,a1,theta1,phi1,a2=None,theta2=None,phi2=None):
3950 Calculate physical spin angles.
3952 singleSpin = None in (a2,theta2,phi2)
3953 m1, m2 =
mc2ms(mc,eta)
3968 return tilt1, tilt2, theta_jn, beta
3972 Calculate the magnitude of the effective precessing spin
3973 following convention from Phys. Rev. D 91, 024043 -- arXiv:1408.1810
3974 note: the paper uses naming convention where m1 < m2
3975 (
and similar
for associated spin parameters)
and q > 1
3978 A1 = 2. + (3.*q_inv/2.)
3979 A2 = 2. + 3./(2.*q_inv)
3980 S1_perp = a1*np.sin(tilt1)*m1*m1
3981 S2_perp = a2*np.sin(tilt2)*m2*m2
3982 Sp = np.maximum(A1*S2_perp, A2*S1_perp)
3983 chi_p = Sp/(A2*m1*m1)
3988 Calculate the redshift from the luminosity distance measurement using the
3989 Cosmology Calculator provided
in LAL.
3990 By default assuming cosmological parameters
from arXiv:1502.01589 -
'Planck 2015 results. XIII. Cosmological parameters'
3991 Using parameters
from table 4, column
'TT+lowP+lensing+ext'
3992 This corresponds to Omega_M = 0.3065, Omega_Lambda = 0.6935, H_0 = 67.90 km s^-1 Mpc^-1
3993 Returns an array of redshifts
3995 def find_z_root(z,dl,omega):
3996 return dl - lal.LuminosityDistance(omega,z)
3998 omega = lal.CreateCosmologicalParameters(h,om,ol,w0,0.0,0.0)
3999 if isinstance(distance,float):
4000 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (distance,omega))])
4002 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (d,omega))
for d
in distance[:,0]])
4003 return z.reshape(z.shape[0],1)
4007 Calculate source mass parameter for mass m
as:
4008 m_source = m / (1.0 + z)
4011 return mass / (1.0 + redshift)
4016 Calculate D_alpha integral; multiplicative factor put later
4017 D_alpha = integral{ ((1+z')^(alpha-2))/sqrt(Omega_m*(1+z')^3 +Omega_lambda) dz
'} # eq.15 of arxiv 1110.2720
4019 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0)
4023 return (1.0+redshift)**(nonGR_alpha-2.0)/(np.sqrt(omega_m*(1.0+redshift)**3.0 + omega_l))
4027 D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha
4028 D_alpha calculated
from integrand
in above function
4030 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0)
4031 H0 = omega.h*lal.H0FAC_SI
4032 dist = integrate.quad(integrand_distance, 0, redshift ,args=(nonGR_alpha))[0]
4033 dist *= (1.0 + redshift)**(1.0 - nonGR_alpha)
4036 return dist*lal.C_SI
4038def lambda_a(redshift, nonGR_alpha, lambda_eff, distance):
4040 Converting from the effective wavelength-like parameter to lambda_A:
4041 lambda_A = lambda_{eff}*(D_alpha/D_L)^(1/(2-alpha))*(1/(1+z)^((1-alpha)/(2-alpha)))
4043 Dfunc = np.vectorize(DistanceMeasure)
4044 D_alpha = Dfunc(redshift, nonGR_alpha)
4045 dl = distance*lal.PC_SI*1e6
4046 return lambda_eff*(D_alpha/(dl*(1.0+redshift)**(1.0-nonGR_alpha)))**(1./(2.0-nonGR_alpha))
4050 Converting to Lorentz violating parameter "A" in dispersion relation
from lambda_A:
4051 A = (lambda_A/h)^(alpha-2)
4053 hPlanck = 4.13567e-15
4054 ampFunc = np.vectorize(lambda_a)
4055 lambdaA = ampFunc(redshift, nonGR_alpha, lambda_eff, distance)/lal.C_SI
4056 return (lambdaA/hPlanck)**(nonGR_alpha-2.0)
4059def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref,phiref):
4061 Wrapper function for SimInspiralTransformPrecessingNewInitialConditions().
4062 Vectorizes function
for use
in append_mapping() methods of the posterior
class.
4065 import lalsimulation
as lalsim
4067 print(
'bayespputils.py: Cannot import lalsimulation SWIG bindings to calculate physical to radiation')
4068 print(
'frame angles, did you remember to use --enable-swig-python when ./configuring lalsimulation?')
4070 from numpy
import shape
4071 transformFunc = lalsim.SimInspiralTransformPrecessingNewInitialConditions
4074 m1_SI = m1*lal.MSUN_SI
4075 m2_SI = m2*lal.MSUN_SI
4078 ins = [theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref]
4079 if len(
shape(ins))>1:
4082 for p,param
in enumerate(ins):
4083 ins[p] = param.flatten()
4088 results = np.array([transformFunc(t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir)
for (t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir)
in zip(*ins)])
4089 iota = results[:,0].reshape(-1,1)
4090 spin1x = results[:,1].reshape(-1,1)
4091 spin1y = results[:,2].reshape(-1,1)
4092 spin1z = results[:,3].reshape(-1,1)
4093 spin2x = results[:,4].reshape(-1,1)
4094 spin2y = results[:,5].reshape(-1,1)
4095 spin2z = results[:,6].reshape(-1,1)
4096 a1,theta1,phi1 =
cart2sph(spin1x,spin1y,spin1z)
4097 a2,theta2,phi2 =
cart2sph(spin2x,spin2y,spin2z)
4099 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4101 S1 = np.hstack([m1*m1*spin1x,m1*m1*spin1y,m1*m1*spin1z])
4102 S2 = np.hstack([m2*m2*spin2x,m2*m2*spin2y,m2*m2*spin2z])
4106 return iota, theta1, phi1, theta2, phi2, beta
4111 elif len(
shape(ins))<=1:
4114 for p,param
in enumerate(ins):
4120 results = np.array(transformFunc(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref))
4128 a1,theta1,phi1 =
cart2sph(spin1x,spin1y,spin1z)
4129 a2,theta2,phi2 =
cart2sph(spin2x,spin2y,spin2z)
4131 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4133 S1 = m1*m1*np.hstack([spin1x,spin1y,spin1z])
4134 S2 = m2*m2*np.hstack([spin2x,spin2y,spin2z])
4138 return iota, theta1, phi1, theta2, phi2, beta
4148 from scipy
import seterr
as sp_seterr
4150 np.seterr(under=
'ignore')
4151 sp_seterr(under=
'ignore')
4152 pos_samps=onedpos.samples
4154 gkde=onedpos.gaussian_kde
4155 except np.linalg.linalg.LinAlgError:
4156 print(
'Singular matrix in KDE. Skipping')
4158 ind=np.linspace(np.min(pos_samps),np.max(pos_samps),101)
4159 kdepdf=gkde.evaluate(ind)
4160 plt.plot(ind,kdepdf,color=
'green')
4164def plot_one_param_pdf(posterior,plot1DParams,analyticPDF=None,analyticCDF=None,plotkde=False):
4166 Plots a 1D histogram and (gaussian) kernel density estimate of the
4167 distribution of posterior samples
for a given parameter.
4169 @param posterior: an instance of the Posterior
class.
4171 @param plot1DParams: a dict; {paramName:Nbins}
4173 @param analyticPDF: an analytic probability distribution function describing the distribution.
4175 @param analyticCDF: an analytic cumulative distribution function describing the distribution.
4177 @param plotkde: Use KDE to smooth plot (default:
False)
4180 matplotlib.rcParams['text.usetex']=
False
4182 param=list(plot1DParams.keys())[0].lower()
4183 histbins=list(plot1DParams.values())[0]
4185 pos_samps=posterior[param].samples
4186 injpar=posterior[param].injval
4187 trigvals=posterior[param].trigvals
4190 myfig=plt.figure(figsize=(6,4.5),dpi=150)
4192 axes=plt.Axes(myfig,[0.15,0.15,0.6,0.76])
4193 myfig.add_axes(axes)
4194 majorFormatterX=ScalarFormatter(useMathText=
True)
4195 majorFormatterX.format_data=
lambda data:
'%.6g'%(data)
4196 majorFormatterY=ScalarFormatter(useMathText=
True)
4197 majorFormatterY.format_data=
lambda data:
'%.6g'%(data)
4198 majorFormatterX.set_scientific(
True)
4199 majorFormatterY.set_scientific(
True)
4201 if param.find(
'time')!=-1:
4202 offset=floor(min(pos_samps))
4203 pos_samps=pos_samps-offset
4204 if injpar
is not None:
4205 injpar=injpar-offset
4206 ax1_name=param+
' + %i'%(
int(offset))
4207 else: ax1_name=param
4209 (n, bins, patches)=plt.hist(pos_samps,histbins,density=
True,facecolor=
'grey')
4210 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4219 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks)
4220 xmin,xmax=plt.xlim()
4221 if param==
'rightascension' or param==
'ra':
4224 if param==
'declination' or param==
'dec':
4227 axes.xaxis.set_major_formatter(majorFormatterX)
4228 axes.yaxis.set_major_formatter(majorFormatterY)
4230 locatorX.view_limits(bins[0],bins[-1])
4231 axes.xaxis.set_major_locator(locatorX)
4233 histbinSize=bins[1]-bins[0]
4235 (xmin,xmax)=plt.xlim()
4236 x = np.linspace(xmin,xmax,2*len(bins))
4237 plt.plot(x, analyticPDF(x+offset), color=
'r', linewidth=2, linestyle=
'dashed')
4241 from numpy.random
import permutation
4242 samps = permutation(pos_samps)[:max_samps,:].flatten()
4243 D,p = stats.kstest(samps+offset, analyticCDF, mode=
'asymp')
4244 plt.title(
"%s: ks p-value %.3f"%(param,p))
4248 if injpar
is not None:
4250 delta_samps=
max(pos_samps)-min(pos_samps)
4251 minrange=min(pos_samps)-0.05*delta_samps
4252 maxrange=
max(pos_samps)+0.05*delta_samps
4253 if minrange<injpar
and maxrange>injpar:
4255 plt.axvline(injpar, color=
'r', linestyle=
'-.', linewidth=4)
4261 if min(pos_samps)<injpar
and max(pos_samps)>injpar:
4262 bins_to_inj=(injpar-bins[0])/histbinSize
4263 injbinh=
int(floor(bins_to_inj))
4264 injbin_frac=bins_to_inj-float(injbinh)
4267 rbins=(sum(n[0:injbinh-1])+injbin_frac*n[injbinh])*histbinSize
4269 if trigvals
is not None:
4270 for IFO
in [IFO
for IFO
in trigvals.keys()]:
4271 trigval = trigvals[IFO]
4272 if min(pos_samps)<trigval
and max(pos_samps)>trigval:
4273 if IFO==
'H1': color =
'r'
4274 elif IFO==
'L1': color =
'g'
4275 elif IFO==
'V1': color =
'm'
4277 plt.axvline(trigval, color=color, linestyle=
'-.')
4281 plt.ylabel(
'Probability Density')
4284 if(param.lower()==
'ra' or param.lower()==
'rightascension'):
4285 xmin,xmax=plt.xlim()
4299class RALocator(matplotlib.ticker.MultipleLocator):
4301 RA tick locations with some intelligence
4304 hour=pi_constant/12.0
4305 if(max-min)>12.0*hour:
4307 elif(max-min)>6.0*hour:
4310 elif (max-min)>3.0*pi_constant/12.0:
4312 elif (max-min)>hour:
4317 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4319class DecLocator(matplotlib.ticker.MultipleLocator):
4321 Dec tick locations with some intelligence
4323 def __init__(self, min=-pi_constant/2.0,max=pi_constant/2.0):
4324 deg=pi_constant/180.0
4325 if (max-min)>60*deg:
4327 elif (max-min)>20*deg:
4329 elif (max-min)>10*deg:
4333 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4337 matplotlib.ticker.FuncFormatter.__init__(self,getRAString)
4341 matplotlib.ticker.FuncFormatter.__init__(self,getDecString)
4346 secs=radians*12.0*3600/pi_constant
4347 hours, rem = divmod(secs, 3600 )
4348 mins,rem = divmod(rem, 60 )
4356 if accuracy==
'hour':
return r'%ih'%(hours)
4357 if accuracy==
'min':
return r'%ih%im'%(hours,mins)
4358 if accuracy==
'sec':
return r'%ih%im%2.0fs'%(hours,mins,secs)
4360 if abs(fmod(secs,60.0))>=0.5: return(
getRAString(radians,accuracy=
'sec'))
4361 if abs(fmod(mins,60.0))>=0.5: return(
getRAString(radians,accuracy=
'min'))
4362 else: return(
getRAString(radians,accuracy=
'hour'))
4366 if matplotlib.rcParams[
'text.usetex']:
4378 deg,rem=divmod(radians,pi_constant/180.0)
4379 mins, rem = divmod(rem, pi_constant/(180.0*60.0))
4380 secs = rem * (180.0*3600.0)/pi_constant
4387 if (accuracy==
'arcmin' or accuracy==
'deg')
and secs>30: mins=mins+1
4388 if accuracy==
'deg' and mins>30: deg=deg+1
4389 if accuracy==
'deg':
return '%i'%(sign*deg)+degsymb
4390 if accuracy==
'arcmin':
return '%i%s%i%s'%(sign*deg,degsymb,mins,minsymb)
4391 if accuracy==
'arcsec':
return '%i%s%i%s%2.0f%s'%(sign*deg,degsymb,mins,minsymb,secs,secsymb)
4400 Make a corner plot using the triangle module
4401 (See http://github.com/dfm/corner.py)
4402 @param posterior: The Posterior object
4403 @param levels: a list of confidence levels
4404 @param parnames: list of parameters to include
4410 import triangle
as corner
4412 print(
'Cannot load corner module. Try running\n\t$ pip install corner')
4414 parnames=list(filter(
lambda x: x
in posterior.names, parnames))
4415 labels = [
plot_label(parname)
for parname
in parnames]
4416 data = np.hstack([posterior[p].samples
for p
in parnames])
4417 if posterior.injection:
4418 injvals=[posterior[p].injval
for p
in parnames]
4419 myfig=corner.corner(data,labels=labels,truths=injvals,quantiles=levels,plot_datapoints=
False,bins=20)
4421 myfig=corner.corner(data,labels=labels,quantiles=levels,plot_datapoints=
False,bins=20)
4425def plot_two_param_kde_greedy_levels(posteriors_by_name,plot2DkdeParams,levels,colors_by_name,line_styles=__default_line_styles,figsize=(4,3),dpi=250,figposition=[0.2,0.2,0.48,0.75],legend=
'right',hatches_by_name=
None,Npixels=50):
4427 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4429 @param posteriors_by_name: dictionary of Posterior objects, indexed by name
4431 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4433 @param levels: list of credible levels
4435 @param colors_by_name: dict of colors, indexed by name
4437 @param line_styles: list of line styles
for the credible levels
4439 @param figsize: figsize to
pass to matplotlib
4441 @param dpi: dpi to
pass to matplotlib
4443 @param figposition: figposition to
pass to matplotlib
4445 @param legend: position of legend
4447 @param hatches_by_name: dict of hatch styles indexed by name
4449 @param Npixels: Number of pixels
in each axis of the 2D grid
4452 from scipy
import seterr
as sp_seterr
4453 confidence_levels=levels
4457 par2_name,par1_name=plot2DkdeParams.keys()
4458 xbin=plot2DkdeParams[par1_name]
4459 ybin=plot2DkdeParams[par2_name]
4461 np.seterr(under=
'ignore')
4462 sp_seterr(under=
'ignore')
4464 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4466 axes=fig.add_axes(figposition)
4470 if len(line_styles)<len(levels):
4471 raise RuntimeError(
"Error: Need as many or more line styles to choose from as confidence levels to plot!")
4474 for name,posterior
in posteriors_by_name.items():
4475 print(
'Plotting '+name)
4476 name_list.append(name)
4477 par1_injvalue=posterior[par1_name].injval
4478 par2_injvalue=posterior[par2_name].injval
4480 par_trigvalues1=posterior[par1_name].trigvals
4481 par_trigvalues2=posterior[par2_name].trigvals
4482 xdat=posterior[par1_name].samples
4483 ydat=posterior[par2_name].samples
4484 a=np.squeeze(posterior[par1_name].samples)
4485 b=np.squeeze(posterior[par2_name].samples)
4487 if par1_name.find(
'time')!=-1:
4488 offset=floor(min(a))
4491 par1_injvalue=par1_injvalue-offset
4492 ax1_name=par1_name+
' + %i'%(
int(offset))
4493 else: ax1_name=par1_name
4495 if par2_name.find(
'time')!=-1:
4496 offset=floor(min(b))
4499 par2_injvalue=par2_injvalue-offset
4500 ax2_name=par2_name+
' + %i'%(
int(offset))
4501 else: ax2_name=par2_name
4503 samp=np.transpose(np.column_stack((xdat,ydat)))
4506 kde=stats.kde.gaussian_kde(samp)
4517 xsorted=np.sort(xdat,axis=
None)
4518 ysorted=np.sort(ydat,axis=
None)
4519 imin=
int(0.01*len(xsorted))
4520 imax=
int(0.99*len(xsorted))
4525 xmid=0.5*(xmin+xmax)
4526 ymid=0.5*(ymin+ymax)
4527 xmin=xmid+(xmin-xmid)/0.95
4528 xmax=xmid+(xmax-xmid)/0.95
4529 ymin=ymid+(ymin-ymid)/0.95
4530 ymax=ymid+(ymax-ymid)/0.95
4531 xax=np.linspace(xmin,xmax,Nx)
4532 yax=np.linspace(ymin,ymax,Ny)
4540 x,y = np.meshgrid(xax,yax)
4541 grid_coords = np.vstack( (x.flatten(),y.flatten()) )
4542 z = kde(grid_coords)
4543 z = z.reshape(Nx,Ny)
4544 densort=np.sort(den)[::-1]
4548 for level
in levels:
4549 ilevel =
int(Npts*level + 0.5)
4552 zvalues.append(densort[ilevel])
4553 CS=plt.contour(x, y, z, zvalues,colors=[colors_by_name[name]],linestyles=line_styles )
4556 if par1_injvalue
is not None and par2_injvalue
is not None:
4557 plt.plot([par1_injvalue],[par2_injvalue],
'b*',scalex=
False,scaley=
False,markersize=12)
4559 if par_trigvalues1
is not None and par_trigvalues2
is not None:
4560 par1IFOs = set([IFO
for IFO
in par_trigvalues1.keys()])
4561 par2IFOs = set([IFO
for IFO
in par_trigvalues2.keys()])
4562 IFOs = par1IFOs.intersection(par2IFOs)
4564 if IFO==
'H1': color =
'r'
4565 elif IFO==
'L1': color =
'g'
4566 elif IFO==
'V1': color =
'm'
4568 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker=
'o',scalex=
False,scaley=
False)
4574 if len(name_list)!=len(CSlst):
4575 raise RuntimeError(
"Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4579 for plot_name
in name_list:
4580 full_name_list.append(plot_name)
4581 if len(confidence_levels)>1:
4582 for ls_,cl
in zip(line_styles[0:len(confidence_levels)],confidence_levels):
4583 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),ls=ls_,color=
'k'))
4584 full_name_list.append(
'%s%%'%
str(
int(cl*100)))
4586 fig_actor_lst = [cs.collections[0]
for cs
in CSlst]
4587 fig_actor_lst.extend(dummy_lines)
4588 if legend
is not None:
4590 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right',framealpha=0.1)
4592 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right')
4593 for text
in twodcontour_legend.get_texts():
4594 text.set_fontsize(
'small')
4596 majorFormatterX=ScalarFormatter(useMathText=
True)
4597 majorFormatterX.format_data=
lambda data:
'%.4g'%(data)
4598 majorFormatterY=ScalarFormatter(useMathText=
True)
4599 majorFormatterY.format_data=
lambda data:
'%.4g'%(data)
4600 majorFormatterX.set_scientific(
True)
4601 majorFormatterY.set_scientific(
True)
4602 axes.xaxis.set_major_formatter(majorFormatterX)
4603 axes.yaxis.set_major_formatter(majorFormatterY)
4604 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4613 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4614 if par1_name==
'rightascension' or par1_name==
'ra':
4615 (ramin,ramax)=plt.xlim()
4618 if par1_name==
'declination' or par1_name==
'dec':
4619 (decmin,decmax)=plt.xlim()
4622 axes.xaxis.set_major_formatter(majorFormatterX)
4623 if par2_name==
'rightascension' or par2_name==
'ra':
4624 (ramin,ramax)=plt.ylim()
4626 axes.yaxis.set_major_locator(locatorY)
4628 if par2_name==
'declination' or par2_name==
'dec':
4629 (decmin,decmax)=plt.ylim()
4632 axes.yaxis.set_major_locator(locatorY)
4634 axes.yaxis.set_major_formatter(majorFormatterY)
4636 axes.xaxis.set_major_locator(locatorX)
4639 if(par1_name.lower()==
'ra' or par1_name.lower()==
'rightascension'):
4640 xmin,xmax=plt.xlim()
4641 if(xmin<0.0): xmin=0.0
4642 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4649 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4651 @param posterior: an instance of the Posterior
class.
4653 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4656 from scipy
import seterr
as sp_seterr
4658 par1_name,par2_name=plot2DkdeParams.keys()
4659 Nx=plot2DkdeParams[par1_name]
4660 Ny=plot2DkdeParams[par2_name]
4662 xdat=posterior[par1_name].samples
4663 ydat=posterior[par2_name].samples
4665 par_injvalue1=posterior[par1_name].injval
4666 par_injvalue2=posterior[par2_name].injval
4668 par_trigvalues1=posterior[par1_name].trigvals
4669 par_trigvalues2=posterior[par2_name].trigvals
4671 np.seterr(under=
'ignore')
4672 sp_seterr(under=
'ignore')
4674 myfig=plt.figure(1,figsize=(6,4),dpi=200)
4675 myfig.add_axes(plt.Axes(myfig,[0.20,0.20,0.75,0.7]))
4678 xax=np.linspace(min(xdat),
max(xdat),Nx)
4679 yax=np.linspace(min(ydat),
max(ydat),Ny)
4680 x,y=np.meshgrid(xax,yax)
4682 samp=np.transpose(np.column_stack((xdat,ydat)))
4684 kde=stats.kde.gaussian_kde(samp)
4686 grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4688 z = kde(grid_coords.T)
4689 z = z.reshape(Nx,Ny)
4692 asp=xax.ptp()/yax.ptp()
4694 plt.imshow(z,extent=(xax[0],xax[-1],yax[0],yax[-1]),aspect=asp,origin=
'lower')
4697 if par_injvalue1
is not None and par_injvalue2
is not None:
4698 plt.plot([par_injvalue1],[par_injvalue2],
'bo',scalex=
False,scaley=
False)
4700 if par_trigvalues1
is not None and par_trigvalues2
is not None:
4701 par1IFOs = set([IFO
for IFO
in par_trigvalues1.keys()])
4702 par2IFOs = set([IFO
for IFO
in par_trigvalues2.keys()])
4703 IFOs = par1IFOs.intersection(par2IFOs)
4705 if IFO==
'H1': color =
'r'
4706 elif IFO==
'L1': color =
'g'
4707 elif IFO==
'V1': color =
'm'
4709 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker=
'o',scalex=
False,scaley=
False)
4716 if(par1_name.lower()==
'ra' or par1_name.lower()==
'rightascension'):
4717 xmin,xmax=plt.xlim()
4725 Filter injections to find the injection with end time given by time +/- 0.1s
4728 injection = itertools.ifilter(
lambda a: abs(float(
get_end(a)) - time) < 0.1, injections).next()
4731def histogram2D(posterior,greedy2Params,confidence_levels):
4733 Returns a 2D histogram and edges
for the two parameters passed
in greedy2Params, plus the actual discrete confidence levels
4734 imposed by the finite number of samples.
4735 H,xedges,yedges,Hlasts =
histogram2D(posterior,greedy2Params,confidence_levels)
4736 @param posterior: Posterior instance
4737 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4738 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4741 par1_name,par2_name=greedy2Params.keys()
4742 par1_bin=greedy2Params[par1_name]
4743 par2_bin=greedy2Params[par2_name]
4744 par1_injvalue=posterior[par1_name.lower()].injval
4745 par2_injvalue=posterior[par2_name.lower()].injval
4746 a=np.squeeze(posterior[par1_name].samples)
4747 b=np.squeeze(posterior[par2_name].samples)
4752 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4753 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4754 H, xedges, yedges = np.histogram2d(a,b, bins=(par1pos_Nbins, par2pos_Nbins),density=True)
4757 confidence_levels.sort()
4760 idxes=np.argsort(temp)
4762 for cl
in confidence_levels:
4763 while float(Hsum/np.sum(H))<cl:
4771 Hlasts.append(Hlast)
4772 return (H,xedges,yedges,Hlasts)
4774def plot_two_param_greedy_bins_contourf(posteriors_by_name,greedy2Params,confidence_levels,colors_by_name,figsize=(7,6),dpi=120,figposition=[0.3,0.3,0.5,0.5],legend=
'right',hatches_by_name=
None):
4776 @param posteriors_by_name A dictionary of posterior objects indexed by name
4777 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4778 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4779 @param colors_by_name: dict of colors, indexed by name
4780 @param figsize: figsize to
pass to matplotlib
4781 @param dpi: dpi to
pass to matplotlib
4782 @param figposition: figposition to
pass to matplotlib
4783 @param legend: Legend position to
pass to matplotlib
4784 @param hatches_by_name: dict of hatch styles, indexed by name
4786 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4788 fig.add_axes(figposition)
4791 par1_name,par2_name=greedy2Params.keys()
4792 for name,posterior
in posteriors_by_name.items():
4793 name_list.append(name)
4794 H,xedges,yedges,Hlasts=
histogram2D(posterior,greedy2Params,confidence_levels+[0.99999999999999])
4795 extent= [xedges[0], yedges[-1], xedges[-1], xedges[0]]
4796 CS2=plt.contourf(yedges[:-1],xedges[:-1],H,Hlasts,extend=
'max',colors=[colors_by_name[name]] ,alpha=0.3 )
4797 CS=plt.contour(yedges[:-1],xedges[:-1],H,Hlasts,extend=
'max',colors=[colors_by_name[name]] )
4800 plt.title(
"%s-%s confidence contours (greedy binning)"%(par1_name,par2_name))
4803 if len(name_list)!=len(CSlst):
4804 raise RuntimeError(
"Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4807 for plot_name
in name_list:
4808 full_name_list.append(plot_name)
4809 if len(confidence_levels)>1:
4810 for cl
in confidence_levels+[1]:
4811 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),color=
'k'))
4812 full_name_list.append(
'%s%%'%
str(
int(cl*100)))
4813 fig_actor_lst = [cs.collections[0]
for cs
in CSlst]
4814 fig_actor_lst.extend(dummy_lines)
4815 if legend
is not None:
4817 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right',framealpha=0.1)
4819 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc=
'right')
4820 for text
in twodcontour_legend.get_texts():
4821 text.set_fontsize(
'small')
4829 Histograms of the ranked pixels produced by the 2-parameter greedy
4830 binning algorithm colured by their confidence level.
4832 @param posterior: an instance of the Posterior
class.
4834 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4836 @param confidence_levels: list of confidence levels to show
4839 from scipy
import seterr
as sp_seterr
4841 np.seterr(under=
'ignore')
4842 sp_seterr(under=
'ignore')
4845 par1_name,par2_name=greedy2Params.keys()
4847 par1_bin=greedy2Params[par1_name]
4848 par2_bin=greedy2Params[par2_name]
4850 a=np.squeeze(posterior[par1_name].samples)
4851 b=np.squeeze(posterior[par2_name].samples)
4854 par1_injvalue=posterior[par1_name.lower()].injval
4855 par2_injvalue=posterior[par2_name.lower()].injval
4864 par1pos_Nbins=
int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4865 par2pos_Nbins=
int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4868 if par1_name.find(
'time')!=-1:
4869 offset=floor(min(a))
4872 par1_injvalue=par1_injvalue-offset
4873 ax1_name=par1_name+
' + %i'%(
int(offset))
4874 else: ax1_name=par1_name
4876 if par2_name.find(
'time')!=-1:
4877 offset=floor(min(b))
4880 par2_injvalue=par2_injvalue-offset
4881 ax2_name=par2_name+
' + %i'%(
int(offset))
4882 else: ax2_name=par2_name
4886 par1_trigvalues=posterior[par1_name.lower()].trigvals
4887 par2_trigvalues=posterior[par2_name.lower()].trigvals
4890 axes=plt.Axes(myfig,[0.3,0.3,0.95-0.3,0.90-0.3])
4891 myfig.add_axes(axes)
4900 majorFormatterX=ScalarFormatter(useMathText=
True)
4901 majorFormatterX.format_data=
lambda data:
'%.4g'%(data)
4902 majorFormatterY=ScalarFormatter(useMathText=
True)
4903 majorFormatterY.format_data=
lambda data:
'%.4g'%(data)
4904 majorFormatterX.set_scientific(
True)
4905 majorFormatterY.set_scientific(
True)
4906 axes.xaxis.set_major_formatter(majorFormatterX)
4907 axes.yaxis.set_major_formatter(majorFormatterY)
4908 H, xedges, yedges = np.histogram2d(a,b, bins,density=
False)
4916 Hsum_actual=np.sum(H)
4918 idxes=np.argsort(temp)
4920 while Hsum<Hsum_actual:
4929 H.flat[max_i]=1-float(Hsum)/float(Hsum_actual)
4931 extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
4932 plt.imshow(np.flipud(H), axes=axes, aspect=
'auto', extent=extent, interpolation=
'nearest',cmap=
'gray_r')
4933 plt.gca().autoscale_view()
4938 Nchars=
max(map(
lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4947 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4948 (xmin,xmax)=plt.xlim()
4949 (ymin,ymax)=plt.ylim()
4950 if par2_name==
'rightascension' or par2_name==
'ra':
4953 if par2_name==
'declination' or par2_name==
'dec':
4956 if par1_name==
'rightascension' or par1_name==
'ra':
4958 axes.yaxis.set_major_locator(locatorY)
4960 if par1_name==
'declination' or par1_name==
'dec':
4962 axes.yaxis.set_major_locator(locatorY)
4965 axes.xaxis.set_major_formatter(majorFormatterX)
4966 axes.yaxis.set_major_formatter(majorFormatterY)
4968 axes.xaxis.set_major_locator(locatorX)
4970 if par1_injvalue
is not None and par2_injvalue
is not None:
4971 plt.plot([par2_injvalue],[par1_injvalue],
'bo',scalex=
False,scaley=
False)
4975 if(par2_name.lower()==
'ra' or par2_name.lower()==
'rightascension'):
4976 xmin,xmax=plt.xlim()
4977 if(xmin)<0.0: xmin=0.0
4978 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4986 Determine the 1-parameter Bayesian Confidence Interval using a greedy
4989 @param posterior: an instance of the posterior
class.
4991 @param greedy1Param: a dict; {paramName:paramBinSize}.
4993 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
4996 paramName=list(greedy1Param.keys())[0]
4997 par_bin=list(greedy1Param.values())[0]
4998 par_samps=posterior[paramName.lower()].samples
5000 parpos_min=min(par_samps)[0]
5001 parpos_max=max(par_samps)[0]
5003 par_point=parpos_min
5005 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5007 greedyPoints=np.zeros((parpos_Nbins,2))
5009 greedyHist=np.zeros(parpos_Nbins,dtype=
'i8')
5012 for i
in range(parpos_Nbins):
5013 greedyPoints[i,0]=par_point
5014 greedyPoints[i,1]=par_point
5017 for par_samp
in par_samps:
5018 par_samp=par_samp[0]
5019 par_binNumber=
int(floor((par_samp-parpos_min)/par_bin))
5021 greedyHist[par_binNumber]+=1
5023 print(
"IndexError: bin number: %i total bins: %i parsamp: %f "\
5024 %(par_binNumber,parpos_Nbins,par_samp))
5028 par_injvalue=posterior[paramName].injval
5030 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5031 injbin=par_binNumber
5033 toppoints,injectionconfidence,reses,injection_area=_greedy_bin(greedyHist,greedyPoints,injbin,float(par_bin),
int(len(par_samps)),confidence_levels)
5035 confidence_levels.sort()
5036 for cl
in confidence_levels:
5037 ind=np.nonzero(toppoints[:,-1]<cl)
5040 cl_intervals.append((np.min(toppoints[ind,0]),np.max(toppoints[ind,0])))
5044 cl_intervals.append((toppoints[ind[0],0],toppoints[ind[0],0]))
5046 return toppoints,injectionconfidence,reses,injection_area,cl_intervals
5051 Calculates the smallest contigious 1-parameter confidence interval for a
5052 set of given confidence levels.
5054 @param posterior: an instance of the Posterior
class.
5056 @param contInt1Params: a dict {paramName:paramBinSize}.
5058 @param confidence_levels: Required confidence intervals.
5064 paramName=list(contInt1Params.keys())[0]
5065 par_bin=list(contInt1Params.values())[0]
5067 par_injvalue=posterior[paramName].injval
5069 par_samps=posterior[paramName].samples
5071 parpos_min=min(par_samps)
5072 parpos_max=max(par_samps)
5074 par_point=parpos_min
5075 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5077 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5079 for par_samp
in par_samps:
5080 par_binNumber=
int(floor((par_samp-parpos_min)/par_bin))
5082 greedyHist[par_binNumber]+=1
5084 print(
"IndexError: bin number: %i total bins: %i parsamp: %f bin: %i"\
5095 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5096 injbin=par_binNumber
5100 len_par_samps=len(par_samps)
5105 while j < len(confidence_levels):
5106 confidence_level=confidence_levels[j]
5111 for i
in range(len(greedyHist)):
5118 while right<len(greedyHist):
5119 Npoints=sum(greedyHist[left:right])
5120 frac=float(Npoints)/float(len_par_samps)
5123 if max_frac
is None:
5136 if injbin
is not None and injinterval
is None:
5137 if injbin
in range(max_left,max_right):
5138 injinterval=(max_right-max_left)*par_bin
5139 oneDContInj[
'interval']=injinterval
5140 oneDContInj[
'confidence']=1-frac
5141 if max_frac > confidence_level:
5146 if max_frac
is None:
5147 print(
"Cant determine intervals at %f confidence!"%confidence_level)
5150 oneDContCL[
'left']=max_left*par_bin
5151 oneDContCL[
'right']=max_right*par_bin
5152 oneDContCL[
'width']=(max_right-max_left)*par_bin
5154 while k+1<len(confidence_levels) :
5155 if confidence_levels[k+1]<max_frac:
5160 return oneDContCL,oneDContInj
5165 super(ACLError, self).
__init__(*args)
5169 """Returns an estimate of the autocorrelation function of a given
5170 series. Returns only the positive-lag portion of the ACF,
5171 normalized so that the zero-th element is 1.
"""
5172 x=series-np.mean(series)
5175 acf=np.fft.ifftshift(signal.fftconvolve(y,x,mode='full'))
5185 """Attempts to find a self-consistent estimate of the
5186 autocorrelation length of a given series.
5188 If C(tau) is the autocorrelation function (normalized so C(0) = 1,
5189 for example
from the autocorrelation procedure
in this module),
5190 then the autocorrelation length
is the smallest s such that
5192 1 + 2*C(1) + 2*C(2) + ... + 2*C(M*s) < s
5194 In words: the autocorrelation length
is the shortest length so
5195 that the sum of the autocorrelation function
is smaller than that
5196 length over a window of M times that length.
5198 The maximum window length
is restricted to be len(series)/K
as a
5199 safety precaution against relying on data near the extreme of the
5200 lags
in the ACF, where there
is a lot of noise. Note that this
5201 implies that the series must be at least M*K*s samples long
in
5202 order to get a reliable estimate of the ACL.
5204 If no such s can be found, raises ACLError;
in this case it
is
5205 likely that the series
is too short relative to its true
5206 autocorrelation length to obtain a consistent ACL estimate.
"""
5212 imax=
int(acf.shape[0]/K)
5216 s=np.arange(1, cacf.shape[0]+1)/float(M)
5220 estimates=np.flatnonzero(cacf[:imax] < s[:imax])
5222 if estimates.shape[0] > 0:
5225 return s[estimates[0]]
5228 raise ACLError(
'autocorrelation length too short for consistent estimate')
5233 Compute the effective sample size, calculating the ACL using only
5234 the second half of the samples to avoid ACL overestimation due to
5235 chains equilibrating after adaptation.
5243 Neffective = floor(N/acl)
5245 return (Neffective, acl, acf)
5251 from igwn_ligolw
import lsctables
5252 from igwn_ligolw
import utils
5253 coincXML = utils.load_filename(xml_file)
5254 coinc = lsctables.CoincTable.get_table(coincXML)
5255 coincMap = lsctables.CoincMapTable.get_table(coincXML)
5256 snglInsps = lsctables.SnglInspiralTable.get_table(coincXML)
5258 if (trignum>len(coinc)):
5259 raise RuntimeError(
"Error: You asked for trigger %d, but %s contains only %d triggers" %(trignum,xml_file,len(coinc)))
5261 coincEventID = coinc.getColumnByName(
'coinc_event_id')[trignum]
5262 eventIDs = [row.event_id
for row
in coincMap
if row.coinc_event_id == coincEventID]
5263 triggers = [row
for row
in snglInsps
if row.event_id
in eventIDs]
5272 Given a list of files, threshold value, and a desired
5273 number of outputs posterior samples,
return the skip number to
5274 achieve the desired number of posterior samples.
5276 if nDownsample
is None:
5277 print(
"Max ACL(s):")
5278 splineParams=[
"spcal_npts",
"spcal_active",
"constantcal_active"]
5279 for i
in np.arange(25):
5280 for k
in lal.cached_detector_by_prefix:
5281 splineParams.append(k.lower()+
'_spcal_freq_'+
str(i))
5282 splineParams.append(k.lower()+
'_spcal_logfreq_'+
str(i))
5284 nonParams = [
"logpost",
"post",
"cycle",
"timestamp",
"snrh1",
"snrl1",
"snrv1",
5285 "margtime",
"margtimephi",
"margtime",
"time_max",
"time_min",
5286 "time_mean",
"time_maxl",
"sky_frame",
"psdscaleflag",
"logdeltaf",
"flow",
"f_ref",
5287 "lal_amporder",
"lal_pnorder",
"lal_approximant",
"tideo",
"spino",
"signalmodelflag",
5288 "temperature",
"nifo",
"nlocaltemps",
"ntemps",
"randomseed",
"samplerate",
"segmentlength",
"segmentstart",
5289 "t0",
"phase_maxl",
"azimuth",
"cosalpha",
"lal_amporder",
"bluni"] + logParams + snrParams + splineParams
5290 fixedParams = [p
for p
in samples.colnames
if all(x==samples[p][0]
for x
in samples[p])]
5291 print(
"Fixed parameters: "+
str(fixedParams))
5292 nonParams.extend(fixedParams)
5293 params = [p
for p
in samples.colnames
if p.lower()
not in nonParams]
5294 stride=np.diff(samples[
'cycle'])[0]
5295 results = np.array([np.array(
effectiveSampleSize(samples[param])[:2])
for param
in params])
5296 nEffs = results[:,0]
5297 nEffective = min(nEffs)
5299 maxACLind = np.argmax(ACLs)
5300 maxACL = ACLs[maxACLind]
5302 print(
"%i (%s)." %(stride*maxACL,params[maxACLind]))
5305 if nDownsample
is not None:
5306 if len(samples) > nDownsample:
5307 nskip *= floor(len(samples)/nDownsample)
5312 if len(samples) > nEff:
5313 nskip =
int(ceil(len(samples)/nEff))
5320 A parser for the output of Bayesian parameter estimation codes.
5322 TODO: Will be abstract
class when LDG moves over to Python >2.6,
5323 inherited by each method .
5326 if inputtype ==
'ns':
5328 elif inputtype ==
'common':
5330 elif inputtype ==
'fm':
5332 elif inputtype ==
"inf_mcmc":
5334 elif inputtype ==
"xml":
5336 elif inputtype ==
'hdf5':
5338 elif inputtype ==
'hdf5s':
5341 raise ValueError(
'Invalid value for "inputtype": %r' % inputtype)
5343 def parse(self,files,**kwargs):
5347 return self.
_parser(files,**kwargs)
5349 def _infmcmc_to_pos(self,files,outdir=None,deltaLogP=None,fixedBurnins=None,nDownsample=None,oldMassConvention=False,**kwargs):
5351 Parser for lalinference_mcmcmpi output.
5353 if not (fixedBurnins
is None):
5354 if not (deltaLogP
is None):
5355 print(
"Warning: using deltaLogP criteria in addition to fixed burnin")
5356 if len(fixedBurnins) == 1
and len(files) > 1:
5357 print(
"Only one fixedBurnin criteria given for more than one output. Applying this to all outputs.")
5358 fixedBurnins = np.ones(len(files),
'int')*fixedBurnins[0]
5359 elif len(fixedBurnins) != len(files):
5360 raise RuntimeError(
"Inconsistent number of fixed burnin criteria and output files specified.")
5361 print(
"Fixed burning criteria: ",fixedBurnins)
5363 fixedBurnins = np.zeros(len(files))
5364 logPThreshold=-np.inf
5365 if not (deltaLogP
is None):
5366 logPThreshold= - deltaLogP
5367 print(
"Eliminating any samples before log(Post) = ", logPThreshold)
5369 if nDownsample
is None:
5370 print(
"Downsampling to take only uncorrelated posterior samples from each file.")
5371 if len(nskips) == 1
and np.isnan(nskips[0]):
5372 print(
"WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5375 for i
in range(len(nskips)):
5376 if np.isnan(nskips[i]):
5377 print(
"%s eliminated since all samples are correlated.")
5379 print(
"Downsampling by a factor of ", nskips[0],
" to achieve approximately ", nDownsample,
" posterior samples")
5382 runfileName=os.path.join(outdir,
"lalinfmcmc_headers.dat")
5383 postName=
"posterior_samples.dat"
5384 runfile=open(runfileName,
'w')
5385 outfile=open(postName,
'w')
5394 def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False):
5396 Concatenate all the samples from the given files into outfile.
5397 For each file, only those samples past the point where the
5398 log(post) > logPThreshold are concatenated after eliminating
5405 nskips = np.ones(len(files),
'int')
5406 for infilename,i,nskip,fixedBurnin
in zip(files,range(1,len(files)+1),nskips,fixedBurnins):
5407 infile=open(infilename,
'r')
5409 print(
"Writing header of %s to %s"%(infilename,runfile.name))
5411 runfile.write(
'Chain '+
str(i)+
':\n')
5412 runfile.writelines(runInfo)
5413 print(
"Processing file %s to %s"%(infilename,outfile.name))
5415 if 'f_ref' not in header:
5418 if oldMassConvention:
5423 if not outputHeader:
5424 for label
in header:
5425 outfile.write(label)
5428 outfile.write(
"f_ref")
5430 outfile.write(
"chain")
5433 iterindex=header.index(
"cycle")
5434 logpindex=header.index(
"logpost")
5438 lineParams=line.split()
5439 iter=
int(lineParams[iterindex])
5440 logP=float(lineParams[logpindex])
5441 if (iter > fixedBurnin)
and (logP >= logPThreshold):
5444 if nRead % nskip == 0:
5445 for label
in outputHeader:
5451 outfile.write(lineParams[header.index(label)])
5454 outfile.write(f_ref)
5456 outfile.write(
str(i))
5459 if output: acceptedChains += 1
5462 print(
"%i of %i chains accepted."%(acceptedChains,len(files)))
5464 def _swaplabel12(self, label):
5465 if label[-1] ==
'1':
5466 return label[0:-1] +
'2'
5467 elif label[-1] ==
'2':
5468 return label[0:-1] +
'1'
5472 def _find_max_logP(self, files):
5474 Given a list of files, reads them, finding the maximum log(post)
5477 for inpname
in files:
5478 infile=open(inpname,
'r')
5481 logpindex=header.index(
"logpost")
5483 line=line.lstrip().split()
5484 logP=float(line[logpindex])
5489 print(
"Found max log(post) = ", maxLogP)
5492 def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample):
5494 Given a list of files, threshold value, and a desired
5495 number of outputs posterior samples,
return the skip number to
5496 achieve the desired number of posterior samples.
5501 if nDownsample
is None: print(
"Max ACL(s):")
5502 for inpname,fixedBurnin
in zip(files,fixedBurnins):
5503 infile = open(inpname,
'r')
5506 header = [name.lower()
for name
in header]
5507 logpindex = header.index(
"logpost")
5508 iterindex = header.index(
"cycle")
5509 deltaLburnedIn =
False
5510 fixedBurnedIn =
False
5515 line = line.lstrip().split()
5516 iter =
int(line[iterindex])
5517 logP = float(line[logpindex])
5518 if iter > fixedBurnin:
5519 fixedBurnedIn =
True
5522 fixedBurnedIn =
False
5525 if logP > logPthreshold:
5526 deltaLburnedIn =
True
5529 if fixedBurnedIn
and deltaLburnedIn
and not adapting:
5533 if nDownsample
is None:
5535 splineParams=[
"spcal_npts",
"spcal_active",
"constantcal_active"]
5536 for i
in np.arange(5):
5537 for k
in [
'h1',
'l1']:
5538 splineParams.append(k+
'_spcal_freq'+
str(i))
5539 splineParams.append(k+
'_spcal_logfreq'+
str(i))
5541 nonParams = [
"logpost",
"cycle",
"timestamp",
"snrh1",
"snrl1",
"snrv1",
5542 "margtime",
"margtimephi",
"margtime",
"time_max",
"time_min",
5543 "time_mean",
"time_maxl",
"sky_frame",
"psdscaleflag",
"logdeltaf",
"flow",
"f_ref",
5544 "lal_amporder",
"lal_pnorder",
"lal_approximant",
"tideo",
"spino",
"signalmodelflag",
5545 "temperature",
"nifo",
"nlocaltemps",
"ntemps",
"randomseed",
"samplerate",
"segmentlength",
"segmentstart",
5546 "t0",
"phase_maxl",
"azimuth",
"cosalpha"] + logParams + snrParams + splineParams
5547 nonParamsIdxs = [header.index(name)
for name
in nonParams
if name
in header]
5548 samps = np.array(lines).astype(float)
5549 fixedIdxs = np.where(np.amin(samps,axis=0)-np.amax(samps,axis=0) == 0.0)[0]
5550 nonParamsIdxs.extend(fixedIdxs)
5551 paramIdxs = [i
for i
in range(len(header))
if i
not in nonParamsIdxs]
5552 stride=samps[1,iterindex] - samps[0,iterindex]
5554 nEffs = results[:,0]
5555 nEffectives.append(min(nEffs))
5557 maxACLind = np.argmax(ACLs)
5558 maxACL = ACLs[maxACLind]
5560 maxACLind = paramIdxs[maxACLind]
5561 print(
"%i (%s) for chain %s." %(stride*maxACL,header[maxACLind],inpname))
5563 nEffectives.append(
None)
5564 print(
"Error computing effective sample size of %s!"%inpname)
5568 nskips = np.ones(nfiles)
5570 if nDownsample
is not None:
5571 if ntot > nDownsample:
5572 nskips *=
int(floor(ntot/nDownsample))
5574 for i
in range(nfiles):
5575 nEff = nEffectives[i]
5579 nskips[i] =
int(ceil(ntot/nEff))
5584 def _find_infmcmc_f_ref(self, runInfo):
5586 Searches through header to determine reference frequency of waveforms.
5587 If no fRef given, calls _find_infmcmc_f_lower to get the lower frequency
5588 bound, which is the default reference frequency
for LALInference.
5591 runInfoIter = iter(runInfo)
5592 for line
in runInfoIter:
5593 headers=line.lstrip().lower().split()
5595 fRefColNum = headers.index(
'f_ref')
5596 info = runInfoIter.next().lstrip().lower().split()
5605 runInfoIter = iter(runInfo)
5606 for line
in runInfoIter:
5607 headers=line.lstrip().lower().split()
5609 if headers[0]==
"command":
5611 fRefInd = headers.index(
'--fref')+1
5612 fRef = headers[fRefInd]
5625 def _find_infmcmc_f_lower(self, runInfo):
5627 Searches through header to determine starting frequency of waveforms.
5628 Assumes same for all IFOs.
5630 runInfo = iter(runInfo)
5631 for line
in runInfo:
5632 headers=line.lstrip().lower().split()
5634 flowColNum = headers.index(
'f_low')
5635 IFOinfo = runInfo.next().lstrip().lower().split()
5636 f_lower = IFOinfo[flowColNum]
5642 def _clear_infmcmc_header(self, infile):
5644 Reads lalinference_mcmcmpi file given, returning the run info and
5645 common output header information.
5649 runInfo.append(line)
5650 headers=line.lstrip().lower().split()
5652 headers.index(
'cycle')
5657 raise RuntimeError(
"couldn't find line with 'cycle' in LALInferenceMCMC input")
5658 return runInfo[:-1],headers
5661 def _ns_to_pos(self,files,Nlive=None,Npost=None,posfilename='posterior_samples.dat'):
5663 Parser for nested sampling output.
5664 files : list of input NS files
5665 Nlive : Number of live points
5666 Npost : Desired number of posterior samples
5667 posfilename : Posterior output file name (default:
'posterior_samples.dat')
5672 print(
"Need lalinference.nest2pos to convert nested sampling output!")
5676 raise RuntimeError(
"Need to specify number of live points in positional arguments of parse!")
5684 parsfilename = (it.next()).strip(
'.gz')+
'_params.txt'
5686 if os.path.isfile(parsfilename):
5687 print(
'Looking for '+parsfilename)
5689 if os.access(parsfilename,os.R_OK):
5691 with open(parsfilename,
'r')
as parsfile:
5692 outpars=parsfile.readline()+
'\n'
5694 raise RuntimeError(
'Cannot open parameters file %s!'%(parsfilename))
5697 outpars=
'mchirp \t eta \t time \t phi0 \t dist \t RA \t \
5698 dec \t psi \t iota \t logl \n'
5701 parsvec=outpars.split()
5703 for i
in range(len(parsvec)):
5704 if parsvec[i].lower()==
'logl':
5707 print(
'Error! Could not find logL column in parameter list: %s'%(outpars))
5710 inarrays=list(map(np.loadtxt,files))
5712 pos=
draw_posterior_many(inarrays,[Nlive
for f
in files],logLcols=[logLcol
for f
in files])
5716 with open(posfilename,
'w')
as posfile:
5718 posfile.write(outpars)
5722 posfile.write(
'%10.12e\t' %(i))
5725 with open(posfilename,
'r')
as posfile:
5730 def _followupmcmc_to_pos(self,files):
5732 Parser for followupMCMC output.
5737 def _multinest_to_pos(self,files):
5739 Parser for MultiNest output.
5743 def _xml_to_pos(self,infile):
5745 Parser for VOTable XML Using
5747 from xml.etree
import ElementTree
as ET
5748 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
5750 register_namespace=ET.register_namespace
5751 except AttributeError:
5752 def register_namespace(prefix,uri):
5753 ET._namespace_map[uri]=prefix
5754 register_namespace(
'vot',xmlns)
5755 tree = ET.ElementTree()
5759 tables = tree.findall(
'.//{%s}TABLE'%(xmlns))
5760 for table
in tables:
5761 if table.get(
'utype')==
'lalinference:results:posteriorsamples':
5763 for table
in tables:
5764 if table.get(
'utype')==
'lalinference:results:nestedsamples':
5765 nsresource=[node
for node
in tree.findall(
'{%s}RESOURCE'%(xmlns))
if node.get(
'utype')==
'lalinference:results'][0]
5767 raise RuntimeError(
'Cannot find "Posterior Samples" TABLE element in XML input file %s'%(infile))
5769 def _VOTTABLE2pos(self,table):
5771 Parser for a VOT TABLE element
with FIELDs
and TABLEDATA elements
5773 from xml.etree
import ElementTree
as ET
5774 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
5776 register_namespace=ET.register_namespace
5777 except AttributeError:
5778 def register_namespace(prefix,uri):
5779 ET._namespace_map[uri]=prefix
5780 register_namespace(
'vot',xmlns)
5783 for field
in table.findall(
'./{%s}FIELD'%(xmlns)):
5784 header.append(field.attrib[
'name'])
5786 raise RuntimeError(
'Unable to find FIELD nodes for table headers in XML table')
5787 data=table.findall(
'./{%s}DATA'%(xmlns))
5788 tabledata=data[0].find(
'./{%s}TABLEDATA'%(xmlns))
5790 for row
in tabledata:
5791 llines.append(np.array(list(map(
lambda a:float(a.text),row))))
5792 flines=np.array(llines)
5793 for i
in range(0,len(header)):
5794 if header[i].lower().find(
'log')!=-1
and header[i].lower()
not in logParams
and re.sub(
'log',
'', header[i].lower())
not in [h.lower()
for h
in header]
and header[i].lower()
not in lorentzInvarianceViolationParams:
5795 print(
'exponentiating %s'%(header[i]))
5797 flines[:,i]=np.exp(flines[:,i])
5799 header[i]=re.sub(
'log',
'', header[i], flags=re.IGNORECASE)
5800 if header[i].lower().find(
'sin')!=-1
and re.sub(
'sin',
'', header[i].lower())
not in [h.lower()
for h
in header]:
5801 print(
'asining %s'%(header[i]))
5802 flines[:,i]=np.arcsin(flines[:,i])
5803 header[i]=re.sub(
'sin',
'', header[i], flags=re.IGNORECASE)
5804 if header[i].lower().find(
'cos')!=-1
and re.sub(
'cos',
'', header[i].lower())
not in [h.lower()
for h
in header]:
5805 print(
'acosing %s'%(header[i]))
5806 flines[:,i]=np.arccos(flines[:,i])
5807 header[i]=re.sub(
'cos',
'', header[i], flags=re.IGNORECASE)
5808 header[i]=header[i].replace(
'(',
'')
5809 header[i]=header[i].replace(
')',
'')
5810 print(
'Read columns %s'%(
str(header)))
5811 return header,flines
5813 def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5814 from astropy.table
import vstack
5816 if fixedBurnins
is None:
5817 fixedBurnins = np.zeros(len(infiles))
5819 if len(infiles) > 1:
5820 multiple_chains =
True
5823 for i, [infile, fixedBurnin]
in enumerate(zip(infiles, fixedBurnins)):
5824 chain = self.
_hdf5_to_table(infile, fixedBurnin=fixedBurnin, deltaLogP=deltaLogP, nDownsample=nDownsample, multiple_chains=multiple_chains, tablename=tablename, **kwargs)
5825 chain.add_column(astropy.table.Column(i*np.ones(len(chain)), name=
'chain'))
5826 chains.append(chain)
5829 if deltaLogP
is not None:
5830 logPThreshold = -np.inf
5831 for chain
in chains:
5833 logPThreshold =
max([logPThreshold,
max(chain[
'logpost'])- deltaLogP])
5834 print(
"Eliminating any samples before log(L) = {}".format(logPThreshold))
5836 for i, chain
in enumerate(chains):
5837 if deltaLogP
is not None:
5838 above_threshold = np.arange(len(chain))[chain[
'logpost'] > logPThreshold]
5839 burnin_idx = above_threshold[0]
if len(above_threshold) > 0
else len(chain)
5842 chains[i] = chain[burnin_idx:]
5844 samples = vstack(chains)
5847 if nDownsample
is not None:
5849 samples = samples[::nskip]
5851 return samples.colnames,
as_array(samples).view(float).reshape(-1, len(samples.columns))
5853 def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs):
5855 Parse a HDF5 file and return an array of posterior samples ad list of
5856 parameter names. Equivalent to
'_common_to_pos' and work
in progress.
5859 samples =
read_samples(infile, tablename=posterior_grp_name)
5862 params = samples.colnames
5864 for param
in params:
5865 param_low = param.lower()
5866 if param_low.find(
'log') != -1
and param_low
not in logParams
and re.sub(
'log',
'', param_low)
not in [p.lower()
for p
in params]
and param_low
not in lorentzInvarianceViolationParams:
5867 print(
'exponentiating %s' % param)
5868 new_param = re.sub(
'log',
'', param, flags=re.IGNORECASE)
5869 samples[new_param] = np.exp(samples[param])
5872 if param_low.find(
'sin') != -1
and re.sub(
'sin',
'', param_low)
not in [p.lower()
for p
in params]:
5873 print(
'asining %s' % param)
5874 new_param = re.sub(
'sin',
'', param, flags=re.IGNORECASE)
5875 samples[new_param] = np.arcsin(samples[param])
5878 if param_low.find(
'cos') != -1
and re.sub(
'cos',
'', param_low)
not in [p.lower()
for p
in params]:
5879 print(
'acosing %s' % param)
5880 new_param = re.sub(
'cos',
'', param, flags=re.IGNORECASE)
5881 samples[new_param] = np.arccos(samples[param])
5885 if param != param.replace(
'(',
''):
5886 samples.rename_column(param, param.replace(
'(',
''))
5887 if param != param.replace(
')',
''):
5888 samples.rename_column(param, param.replace(
')',
''))
5893 params = samples.colnames
5894 print(
'Read columns %s' %
str(params))
5897 if 'cycle' in params:
5898 if not (fixedBurnin
is None):
5899 if not (deltaLogP
is None):
5900 print(
"Warning: using deltaLogP criteria in addition to fixed burnin")
5901 print(
"Fixed burning criteria: ",fixedBurnin)
5905 burned_in_cycles = np.arange(len(samples))[samples[
'cycle'] > fixedBurnin]
5906 burnin_idx = burned_in_cycles[0]
if len(burned_in_cycles) > 0
else len(samples)
5907 samples = samples[burnin_idx:]
5909 logPThreshold=-np.inf
5910 if len(samples) > 0
and not (deltaLogP
is None):
5911 logPThreshold =
max(samples[
'logpost'])- deltaLogP
5912 print(
"Eliminating any samples before log(post) = ", logPThreshold)
5913 burnin_idx = np.arange(len(samples))[samples[
'logpost'] > logPThreshold][0]
5914 samples = samples[burnin_idx:]
5916 if len(samples) > 0:
5918 if nDownsample
is None:
5919 print(
"Downsampling to take only uncorrelated posterior samples from each file.")
5920 if np.isnan(nskip)
and not multiple_chains:
5921 print(
"WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5923 samples = samples[::nskip]
5926 print(
"WARNING: All samples in {} are correlated.".format(infile))
5927 samples = samples[-1:]
5929 print(
"Downsampling by a factor of ", nskip,
" to achieve approximately ", nDownsample,
" posterior samples")
5930 samples = samples[::nskip]
5934 def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5935 samples = self.
_hdf5_to_table(infile, fixedBurnin=fixedBurnins, deltaLogP=deltaLogP, nDownsample=nDownsample, tablename=tablename, **kwargs)
5937 return samples.colnames,
as_array(samples).view(float).reshape(-1, len(samples.columns))
5939 def _common_to_pos(self,infile,info=[None,None]):
5941 Parse a file in the
'common format' and return an array of posterior
5942 samples
and list of parameter names. Will apply inverse functions to
5943 columns
with names containing sin,cos,log.
5946 [headerfile,delimiter]=info
5948 if headerfile==
None:
5949 formatstr=infile.readline().lstrip()
5951 hf=open(headerfile,
'r')
5952 formatstr=hf.readline().lstrip()
5955 formatstr=formatstr.replace(
'#',
'')
5956 formatstr=formatstr.replace(
'"',
'')
5958 header=formatstr.split(delimiter)
5959 header[-1]=header[-1].rstrip(
'\n')
5962 dec=re.compile(
r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$|^inf$')
5964 for line_number,line
in enumerate(infile):
5965 sline=line.split(delimiter)
5966 if sline[-1] ==
'\n':
5970 print(
'Ignoring empty line in input file: %s'%(sline))
5972 elif len(sline)!=nparams:
5973 sys.stderr.write(
'WARNING: Malformed row %i, read %i elements but there is meant to be %i\n'%(line_number,len(sline),nparams))
5976 for elemn,st
in enumerate(sline):
5977 s=st.replace(
'\n',
'')
5978 if dec.search(s)
is None:
5979 print(
'Warning! Ignoring non-numeric data after the header: %s. Row = %i,Element=%i'%(s,line_number,elemn))
5985 llines.append(list(map(float,sline)))
5987 flines=np.array(llines)
5989 if not flines.any():
5990 raise RuntimeError(
"ERROR: no lines read in!")
5993 for i
in range(0,len(header)):
5994 if header[i].lower().find(
'log')!=-1
and header[i].lower()
not in logParams
and re.sub(
'log',
'', header[i].lower())
not in [h.lower()
for h
in header]
and header[i].lower()
not in lorentzInvarianceViolationParams:
5995 print(
'exponentiating %s'%(header[i]))
5997 flines[:,i]=np.exp(flines[:,i])
5999 header[i]=re.sub(
'log',
'', header[i], flags=re.IGNORECASE)
6000 if header[i].lower().find(
'sin')!=-1
and re.sub(
'sin',
'', header[i].lower())
not in [h.lower()
for h
in header]:
6001 print(
'asining %s'%(header[i]))
6002 flines[:,i]=np.arcsin(flines[:,i])
6003 header[i]=re.sub(
'sin',
'', header[i], flags=re.IGNORECASE)
6004 if header[i].lower().find(
'cos')!=-1
and re.sub(
'cos',
'', header[i].lower())
not in [h.lower()
for h
in header]:
6005 print(
'acosing %s'%(header[i]))
6006 flines[:,i]=np.arccos(flines[:,i])
6007 header[i]=re.sub(
'cos',
'', header[i], flags=re.IGNORECASE)
6008 header[i]=header[i].replace(
'(',
'')
6009 header[i]=header[i].replace(
')',
'')
6010 print(
'Read columns %s'%(
str(header)))
6011 return header,flines
6016 lines=fo.split(
'\n')
6021 key=line.replace(
'[1]',
'').strip(
' ').strip(
'"')
6025 if result
is not {}:
6031 key=line.strip(
'"').split()[1]
6036 newline=line.strip(
'"').split()
6037 if newline
is not []:
6038 out2.append(line.strip(
'"').split())
6047 Parse a VO Table RESOURCE containing nested sampling output and
6048 return a VOTable TABLE element
with posterior samples
in it.
6049 This can be added to an existing tree by the user.
6050 Nlive will be read
from the nsresource, unless specified
6052 from xml.etree
import ElementTree
as ET
6054 from math
import log, exp
6055 xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
6057 register_namespace=ET.register_namespace
6058 except AttributeError:
6059 def register_namespace(prefix,uri):
6060 ET._namespace_map[uri]=prefix
6061 register_namespace(
'vot',xmlns)
6063 postable=ET.Element(
"{%s}TABLE"%(xmlns),attrib={
'name':
'Posterior Samples',
'utype':
'lalinference:results:posteriorsamples'})
6065 nstables=[resource
for resource
in nsresource.findall(
"./{%s}TABLE"%(xmlns))
if resource.get(
"utype")==
"lalinference:results:nestedsamples"]
6069 runstateResource = [resource
for resource
in nsresource.findall(
"./{%s}RESOURCE"%(xmlns))
if resource.get(
"utype")==
"lalinference:state"][0]
6070 algTable = [table
for table
in runstateResource.findall(
"./{%s}TABLE"%(xmlns))
if table.get(
"utype")==
"lalinference:state:algorithmparams"][0]
6071 Nlive = int ([param
for param
in algTable.findall(
"./{%s}PARAM"%(xmlns))
if param.get(
"name")==
'Nlive'][0].get(
'value'))
6072 print(
'Found Nlive %i'%(Nlive))
6074 raise RuntimeError(
"Cannot find number of live points in XML table, please specify")
6076 for fieldnode
in nstable.findall(
'./{%s}FIELD'%xmlns):
6077 if fieldnode.get(
'name') ==
'logL':
6080 postable.append(copy.deepcopy(fieldnode))
6081 for paramnode
in nstable.findall(
'./{%s}PARAM'%(xmlns)):
6082 postable.append(copy.deepcopy(paramnode))
6084 RuntimeError(
"Unable to find logL column")
6085 posdataNode=ET.Element(
"{%s}DATA"%(xmlns))
6086 postabledataNode=ET.Element(
"{%s}TABLEDATA"%(xmlns))
6087 postable.append(posdataNode)
6088 posdataNode.append(postabledataNode)
6089 nstabledata=nstable.find(
'./{%s}DATA/{%s}TABLEDATA'%(xmlns,xmlns))
6090 logw=log(1.0 - exp(-1.0/float(Nlive)))
6092 for row
in nstabledata:
6093 logL=float(row[logLcol].text)
6094 weights.append(logL-logw)
6095 logw=logw-1.0/float(Nlive)
6097 weights = [w - mw
for w
in weights]
6098 for (row,weight)
in zip(nstabledata,weights):
6099 if weight > log(random.random()):
6100 postabledataNode.append(copy.deepcopy(row))
6103xmlns=
'http://www.ivoa.net/xml/VOTable/v1.1'
6111 if tag==
'{%s}TABLE'%(xmlns):
6112 if attrib[
'utype']==
'lalinference:results:nestedsamples'\
6113 or attrib[
'utype']==
'lalinference:results:posteriorsamples':
6126 if tag==
'{%s}FIELD'%(xmlns):
6128 if tag==
'{%s}TR'%(xmlns):
6130 if tag==
'{%s}TD'%(xmlns):
6132 if tag==
'{%s}PARAM'%(xmlns):
6135 namenode.p(attrib[
'name'])
6137 valnode.p(attrib[
'value'])
6140 if tag==
'{%s}TABLE'%(xmlns):
6143 if tag==
'{%s}FIELD'%(xmlns):
6145 if tag==
'{%s}TD'%(xmlns):
6152 return self.
html.toprettyxml()
6154def _cl_width(cl_bound):
6155 """Returns (high - low), the width of the given confidence
6158 return cl_bound[1] - cl_bound[0]
6160def _cl_count(cl_bound, samples):
6161 """Returns the number of samples within the given confidence
6164 return np.sum((samples >= cl_bound[0]) & (samples <= cl_bound[1]))
6167 """Returns a tuple (relative_change, fractional_uncertainty,
6168 percentile_uncertainty) giving the uncertainty in confidence
6169 intervals
from multiple posteriors.
6171 The uncertainty
in the confidence intervals
is the difference
in
6172 length between the widest interval, formed
from the smallest to
6173 largest values among all the cl_bounds,
and the narrowest
6174 interval, formed
from the largest-small
and smallest-large values
6175 among all the cl_bounds. Note that neither the smallest nor the
6176 largest confidence intervals necessarily correspond to one of the
6179 The relative change relates the confidence interval uncertainty to
6180 the expected value of the parameter, the fractional uncertainty
6181 relates this length to the length of the confidence level
from the
6182 combined posteriors,
and the percentile uncertainty gives the
6183 change
in percentile over the combined posterior between the
6184 smallest
and largest confidence intervals.
6186 @param cl The confidence level (between 0
and 1).
6188 @param cl_bounds A list of (low, high) pairs giving the confidence
6189 interval associated
with each posterior.
6191 @param posteriors A list of PosteriorOneDPDF objects giving the
6194 Ns=[p.samples.shape[0] for p
in posteriors]
6199 all_samples = np.squeeze(np.concatenate([p.samples
for p
in posteriors], axis=0))
6200 weights = np.squeeze(np.concatenate([p.samples*0.0+1.0/(Nsamplers*N)
for (N,p)
in zip(Ns,posteriors)], axis=0))
6202 isort=np.argsort(all_samples)
6204 all_samples = all_samples[isort]
6205 weights = weights[isort]
6207 param_mean = np.average(all_samples, weights=weights)
6209 N=all_samples.shape[0]
6211 alpha = (1.0 - cl)/2.0
6213 wttotal = np.cumsum(weights)
6214 ilow = np.nonzero(wttotal >= alpha)[0][0]
6215 ihigh = np.nonzero(wttotal >= 1.0-alpha)[0][0]
6217 all_cl_bound = (all_samples[ilow], all_samples[ihigh])
6219 low_bounds = np.array([l
for (l,h)
in cl_bounds])
6220 high_bounds = np.array([h
for (l,h)
in cl_bounds])
6222 largest_cl_bound = (np.min(low_bounds), np.max(high_bounds))
6223 smallest_cl_bound = (np.max(low_bounds), np.min(high_bounds))
6225 if smallest_cl_bound[1] < smallest_cl_bound[0]:
6227 smallest_cl_bound = (0.0, 0.0)
6229 ci_uncertainty = _cl_width(largest_cl_bound) - _cl_width(smallest_cl_bound)
6231 relative_change = ci_uncertainty/param_mean
6233 frac_uncertainty = ci_uncertainty/_cl_width(all_cl_bound)
6235 quant_uncertainty = float(_cl_count(largest_cl_bound, all_samples) - _cl_count(smallest_cl_bound, all_samples))/float(N)
6237 return (relative_change, frac_uncertainty, quant_uncertainty)
6240def plot_waveform(pos=None,siminspiral=None,event=0,path=None,ifos=['H1','L1','V1']):
6242 from igwn_ligolw
import lsctables,ligolw
6243 from lalsimulation
import SimInspiralChooseTDWaveform,SimInspiralChooseFDWaveform
6244 from lalsimulation
import SimInspiralImplementedTDApproximants,SimInspiralImplementedFDApproximants
6245 from lal
import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT
6246 from lal
import ComputeDetAMResponse, GreenwichMeanSiderealTime
6247 from lal
import LIGOTimeGPS
6248 from lal
import MSUN_SI
as LAL_MSUN_SI
6249 from lal
import PC_SI
as LAL_PC_SI
6250 import lalsimulation
as lalsim
6251 from math
import cos,sin,sqrt
6252 from igwn_ligolw
import utils
6255 from numpy
import arange
6260 colors_inj={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'b',
'J1':
'y'}
6261 colors_rec={
'H1':
'k',
'L1':
'k',
'V1':
'k',
'I1':
'k',
'J1':
'k'}
6267 deltaF = 1.0 / (length* deltaT);
6271 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(
int(length), 1 );
6272 window=CreateTukeyREAL8Window(
int(length),2.0*pad*srate/length);
6273 WinNorm = sqrt(window.sumofsquares/window.data.length);
6276 strainT=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6277 strainF= CreateCOMPLEX16FrequencySeries(
"strainF",segStart, 0.0, deltaF, DimensionlessUnit,
int(length/2. +1));
6284 inj_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6285 rec_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6290 if siminspiral
is not None:
6293 xmldoc = utils.load_filename(siminspiral)
6294 tbl = lsctables.SimInspiralTable.get_table(xmldoc)
6300 e = sys.exc_info()[0]
6302 print(
"Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,siminspiral))
6305 REAL8time=tbl.geocent_end_time+1e-9*tbl.geocent_end_time_ns
6306 GPStime=LIGOTimeGPS(REAL8time)
6312 phiRef=tbl.coa_phase
6323 iota=tbl.inclination
6324 print(
"WARNING: Defaulting to inj_fref =100Hz to plot the injected WF. This is hardcoded since xml table does not carry this information\n")
6328 wf=
str(tbl.waveform)
6330 injapproximant=lalsim.GetApproximantFromString(wf)
6331 amplitudeO=
int(tbl.amp_order )
6332 phaseO=lalsim.GetOrderFromString(wf)
6334 waveFlags=lal.CreateDict()
6335 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6336 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6340 psi=tbl.polarization
6342 if SimInspiralImplementedFDApproximants(injapproximant):
6344 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6346 deltaF, f_min, f_max, f_ref,
6347 waveFlags, injapproximant)
6348 elif SimInspiralImplementedTDApproximants(injapproximant):
6350 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6352 deltaT, f_min, f_ref,
6353 waveFlags, injapproximant)
6355 print(
"\nThe approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%injapproximant)
6359 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6363 for k
in np.arange(strainT.data.length):
6364 if k<plus.data.length:
6365 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6367 strainT.data.data[k]=0.0
6368 strainT.data.data[k]*=window.data.data[k]
6370 inj_strains[ifo][
"T"][
'strain']=np.array([strainT.data.data[k]
for k
in arange(plus.data.length)])
6371 inj_strains[ifo][
"T"][
'x']=np.array([REAL8time - deltaT*(plus.data.length-1-k)
for k
in np.arange(plus.data.length)])
6374 for j
in arange(strainF.data.length):
6375 strainF.data.data[j]=0.0
6376 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6377 for j
in arange(strainF.data.length):
6378 strainF.data.data[j]/=WinNorm
6380 inj_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6381 inj_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6382 elif inj_domain==
'F':
6383 for k
in np.arange(strainF.data.length):
6384 if k<plus.data.length:
6385 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6387 strainF.data.data[k]=0.0
6389 inj_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6390 inj_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6394 _,which=pos._posMap()
6396 if 'time' in pos.names:
6397 REAL8time=pos[
'time'].samples[which][0]
6398 elif 'time_maxl' in pos.names:
6399 REAL8time=pos[
'time_maxl'].samples[which][0]
6400 elif 'time_min' in pos.names
and 'time_max' in pos.names:
6401 REAL8time=pos[
'time_min'].samples[which][0]+0.5*(pos[
'time_max'].samples[which][0]-pos[
'time_min'].samples[which][0])
6403 print(
"ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
6409 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
6410 amplitudeO=
int(pos[
'LAL_AMPORDER'].samples[which][0])
6411 phaseO=
int(pos[
'LAL_PNORDER'].samples[which][0])
6415 GPStime=LIGOTimeGPS(REAL8time)
6417 q=pos[
'q'].samples[which][0]
6418 mc=pos[
'mc'].samples[which][0]
6420 if 'dist' in pos.names:
6421 D=pos[
'dist'].samples[which][0]
6422 elif 'distance' in pos.names:
6423 D=pos[
'distance'].samples[which][0]
6424 elif 'logdistance' in pos.names:
6425 D=np.exp(pos[
'distance'].samples[which][0])
6429 if 'phi_orb' in pos.names:
6430 phiRef=pos[
'phi_orb'].samples[which][0]
6431 elif 'phase' in pos.names:
6432 phiRef=pos[
'phase'].samples[which][0]
6433 elif 'phase_maxl' in pos.names:
6434 phiRef=pos[
'phase_maxl'].samples[which][0]
6435 print(
'INFO: phi_orb not estimated, using maximum likelihood value')
6437 print(
'WARNING: phi_orb not found in posterior files. Defaulting to 0.0 which is probably *not* what you want\n')
6441 for name
in [
'flow',
'f_lower']:
6442 if name
in pos.names:
6443 f_min=pos[name].samples[which][0]
6448 for name
in [
'fref',
'f_ref',
'f_Ref',
'fRef']:
6449 if name
in pos.names:
6452 Fref = np.unique(pos[fname].samples)
6454 print(
"ERROR: Expected f_ref to be constant for all samples. Can't tell which value was injected! Defaulting to 100 Hz\n")
6459 print(
"WARNING: Could not read fref from posterior file! Defaulting to 100 Hz\n")
6462 a = pos[
'a1'].samples[which][0]
6463 the = pos[
'theta_spin1'].samples[which][0]
6464 phi = pos[
'phi_spin1'].samples[which][0]
6465 s1x = (a * sin(the) * cos(phi));
6466 s1y = (a * sin(the) * sin(phi));
6467 s1z = (a * cos(the));
6468 a = pos[
'a2'].samples[which][0]
6469 the = pos[
'theta_spin2'].samples[which][0]
6470 phi = pos[
'phi_spin2'].samples[which][0]
6471 s2x = (a * sin(the) * cos(phi));
6472 s2y = (a * sin(the) * sin(phi));
6473 s2z = (a * cos(the));
6474 iota=pos[
'inclination'].samples[which][0]
6477 iota, s1x, s1y, s1z, s2x, s2y, s2z=lalsim.SimInspiralTransformPrecessingNewInitialConditions(pos[
'theta_jn'].samples[which][0], pos[
'phi_JL'].samples[which][0], pos[
'tilt1'].samples[which][0], pos[
'tilt2'].samples[which][0], pos[
'phi12'].samples[which][0], pos[
'a1'].samples[which][0], pos[
'a2'].samples[which][0], m1, m2, f_ref, phiRef)
6479 if 'a1z' in pos.names:
6480 s1z=pos[
'a1z'].samples[which][0]
6481 elif 'a1' in pos.names:
6482 s1z=pos[
'a1'].samples[which][0]
6485 if 'a2z' in pos.names:
6486 s2z=pos[
'a2z'].samples[which][0]
6487 elif 'a2' in pos.names:
6488 s2z=pos[
'a2'].samples[which][0]
6492 if 'inclination' in pos.names:
6493 iota=pos[
'inclination'].samples[which][0]
6495 iota=pos[
'theta_jn'].samples[which][0]
6499 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
6500 amplitudeO=
int(pos[
'LAL_AMPORDER'].samples[which][0])
6501 phaseO=
int(pos[
'LAL_PNORDER'].samples[which][0])
6503 waveFlags=lal.CreateDict()
6504 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6505 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6506 if 'tideO' in pos.names:
6507 tidalO=
int(pos[
'tideO'].samples[which][0])
6508 lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(waveFlags, tidalO)
6509 if 'spinO' in pos.names:
6510 spinO=
int(pos[
'spinO'].samples[which][0])
6511 lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(waveFlags, spinO)
6512 if 'lambda1' in pos.names:
6513 lalsim.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, pos[
'lambda1'].samples[which][0])
6514 if 'lambda2' in pos.names:
6515 lalsim.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, pos[
'lambda2'].samples[which][0])
6517 if SimInspiralImplementedFDApproximants(approximant):
6519 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6521 deltaF, f_min, f_max, f_ref,
6522 waveFlags, approximant)
6523 elif SimInspiralImplementedTDApproximants(approximant):
6525 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6527 deltaT, f_min, f_ref,
6528 waveFlags, approximant)
6530 print(
"The approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%approximant)
6533 ra=pos[
'ra'].samples[which][0]
6534 dec=pos[
'dec'].samples[which][0]
6535 psi=pos[
'psi'].samples[which][0]
6538 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6542 for k
in np.arange(strainT.data.length):
6543 if k<plus.data.length:
6544 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6546 strainT.data.data[k]=0.0
6547 strainT.data.data[k]*=window.data.data[k]
6549 rec_strains[ifo][
"T"][
'strain']=np.array([strainT.data.data[k]
for k
in arange(plus.data.length)])
6550 rec_strains[ifo][
"T"][
'x']=np.array([REAL8time - deltaT*(plus.data.length-1-k)
for k
in np.arange(plus.data.length)])
6553 for j
in arange(strainF.data.length):
6554 strainF.data.data[j]=0.0
6555 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6556 for j
in arange(strainF.data.length):
6557 strainF.data.data[j]/=WinNorm
6559 rec_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6560 rec_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6561 elif rec_domain==
'F':
6562 for k
in np.arange(strainF.data.length):
6563 if k<plus.data.length:
6564 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6566 strainF.data.data[k]=0.0
6568 rec_strains[ifo][
"F"][
'strain']=np.array([strainF.data.data[k]
for k
in arange(
int(strainF.data.length))])
6569 rec_strains[ifo][
"F"][
'x']=np.array([strainF.f0+ k*strainF.deltaF
for k
in arange(
int(strainF.data.length))])
6571 myfig=plt.figure(1,figsize=(23,15))
6579 if rec_domain
is not None and inj_domain
is not None:
6580 if rec_domain==
"T" and inj_domain==
"T":
6582 elif rec_domain
is not None:
6585 elif inj_domain
is not None:
6589 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=
False,sharey=
False)
6590 plt.setp(A,figwidth=23,figheight=15)
6591 for (r,i)
in zip(np.arange(rows),ifos):
6592 for c
in np.arange(cols):
6594 if type(ax)==np.ndarray:
6598 if rec_strains[i][
"T"][
'strain']
is not None or rec_strains[i][
"F"][
'strain']
is not None:
6600 if global_domain==
"T":
6601 ax.plot(rec_strains[i][
"T"][
'x'],rec_strains[i][
"T"][
'strain'],colors_rec[i],alpha=0.5,label=
'%s maP'%i)
6603 data=rec_strains[i][
"F"][
'strain']
6604 f=rec_strains[i][
"F"][
'x']
6605 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6607 ax.semilogx(f[mask],ys[mask].real,
'.-',color=colors_rec[i],alpha=0.5,label=
'%s maP'%i)
6609 data=rec_strains[i][
"F"][
'strain']
6610 f=rec_strains[i][
"F"][
'x']
6611 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6613 ax.loglog(f[mask],abs(ys[mask]),
'--',color=colors_rec[i],alpha=0.5,linewidth=4)
6614 ax.set_xlim([min(f[mask]),
max(f[mask])])
6615 ax.grid(
True,which=
'both')
6616 if inj_strains[i][
"T"][
'strain']
is not None or inj_strains[i][
"F"][
'strain']
is not None:
6618 if global_domain==
"T":
6619 ax.plot(inj_strains[i][
"T"][
'x'],inj_strains[i][
"T"][
'strain'],colors_inj[i],alpha=0.5,label=
'%s inj'%i)
6621 data=inj_strains[i][
"F"][
'strain']
6622 f=inj_strains[i][
"F"][
'x']
6623 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6625 ax.plot(f[mask],ys[mask].real,
'.-',color=colors_inj[i],alpha=0.5,label=
'%s inj'%i)
6627 data=inj_strains[i][
"F"][
'strain']
6628 f=inj_strains[i][
"F"][
'x']
6629 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6631 ax.loglog(f[mask],abs(ys[mask]),
'--',color=colors_inj[i],alpha=0.5,linewidth=4)
6632 ax.set_xlim([min(f[mask]),
max(f[mask])])
6633 ax.grid(
True,which=
'both')
6637 if global_domain==
"T":
6638 ax.set_title(
r"$h(t)$",fontsize=font_size)
6640 ax.set_title(
r"$\Re[h(f)]$",fontsize=font_size)
6642 ax.set_title(
r"$|h(f)|$",fontsize=font_size)
6645 if global_domain==
"T":
6646 ax.set_xlabel(
"time [s]",fontsize=font_size)
6648 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
6650 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
6652 ax.legend(loc=
'best')
6656 A.savefig(os.path.join(path,
'WF_DetFrame.png'),bbox_inches=
'tight')
6657 return inj_strains,rec_strains
6661 myfig2=plt.figure(figsize=(15,15),dpi=500)
6662 ax=plt.subplot(1,1,1)
6663 colors={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'k',
'J1':
'y'}
6669 if not os.path.isfile(f):
6670 print(
"PSD file %s has not been found and won't be plotted\n"%f)
6683 idx=f.find(
'-PSD.dat')
6685 freqs[ifo.lower()] = freq
6688 for (f,d)
in zip(freq,data):
6689 if f>f_min
and d!=0.0
and np.isfinite(d):
6692 plt.loglog(fr,da,colors[ifo],label=ifo,alpha=0.5,linewidth=3)
6693 plt.xlim([min(fr),
max(fr)])
6694 plt.xlabel(
"Frequency [Hz]",fontsize=26)
6695 plt.ylabel(
"PSD",fontsize=26)
6696 plt.legend(loc=
'best')
6697 plt.grid(which=
'both')
6700 myfig2.savefig(os.path.join(outpath,
'PSD.png'),bbox_inches=
'tight')
6702 myfig2.savefig(os.path.join(outpath,
'PSD.png'))
6707cred_level =
lambda cl, x: np.sort(x, axis=0)[
int(cl*len(x))]
6710 """Return location of lower or upper confidence levels
6713 cl: Confidence level to return the bound of.
6714 lower: If ``
True``,
return the lower bound, otherwise
return the upper bound.
6722 """Returns the angle in degrees corresponding to the spline
6723 calibration parameters delta_psi.
6726 rot = (2.0 + 1.0j*delta_psi)/(2.0 - 1.0j*delta_psi)
6728 return 180.0/np.pi*np.arctan2(np.imag(rot), np.real(rot))
6730def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None):
6731 """Plot calibration posterior estimates for a spline model in log space.
6733 logf: The (log) location of spline control points.
6734 ys: List of posterior draws of function at control points ``logf``
6735 nx: Number of points to evaluate spline at for plotting.
6736 level: Credible level to fill
in.
6737 color: Color to plot
with.
6738 label: Label
for plot.
6739 xform: Function to transform the spline into plotted values.
6742 fs = np.linspace(f.min(), f.max(), nf)
6744 data = np.zeros((ys.shape[0], nf))
6751 mu = np.mean(zs, axis=0)
6754 plt.errorbar(np.exp(logf), mu, yerr=[lower_cl, upper_cl], fmt=
'.', color=color, lw=4, alpha=0.5, capsize=0)
6756 for i, samp
in enumerate(ys):
6758 temp = interpolate.spline(logf, samp, np.log(fs))
6759 except AttributeError:
6760 calSpline = interpolate.InterpolatedUnivariateSpline(logf, samp, k=3, ext=2)
6761 temp = calSpline(np.log(fs))
6765 data[i] = xform(temp)
6767 line, = plt.plot(fs, np.mean(data, axis=0), color=color, label=label)
6768 color = line.get_color()
6769 plt.fill_between(fs,
cred_interval(data, level),
cred_interval(data, level, lower=
False), color=color, alpha=.1, linewidth=0.1)
6770 plt.xlim(f.min()-.5, f.max()+50)
6773 fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(15, 15), dpi=500)
6780 ifos = np.unique([param.split(
'_')[0]
for param
in params
if 'spcal_freq' in param])
6782 if ifo==
'h1': color =
'r'
6783 elif ifo==
'l1': color =
'g'
6784 elif ifo==
'v1': color =
'm'
6788 freq_params = np.sort([param
for param
in params
if
6789 '{0}_spcal_freq'.format(ifo)
in param])
6791 logfreqs = np.log([pos[param].median
for param
in freq_params])
6795 amp_params = np.sort([param
for param
in params
if
6796 '{0}_spcal_amp'.format(ifo)
in param])
6797 if len(amp_params) > 0:
6798 amp = 100*np.column_stack([pos[param].samples
for param
in amp_params])
6799 plot_spline_pos(logfreqs, amp, color=color, level=level, label=
"{0} (mean, {1}%)".format(ifo.upper(),
int(level*100)))
6803 phase_params = np.sort([param
for param
in params
if
6804 '{0}_spcal_phase'.format(ifo)
in param])
6805 if len(phase_params) > 0:
6806 phase = np.column_stack([pos[param].samples
for param
in phase_params])
6808 plot_spline_pos(logfreqs, phase, color=color, level=level, label=
"{0} (mean, {1}%)".format(ifo.upper(),
int(level*100)), xform=spline_angle_xform)
6810 ax1.tick_params(labelsize=.75*font_size)
6811 ax2.tick_params(labelsize=.75*font_size)
6813 plt.legend(loc=
'upper right', prop={
'size':.75*font_size}, framealpha=0.1)
6815 plt.legend(loc=
'upper right', prop={
'size':.75*font_size})
6816 ax1.set_xscale(
'log')
6817 ax2.set_xscale(
'log')
6819 ax2.set_xlabel(
'Frequency (Hz)', fontsize=font_size)
6820 ax1.set_ylabel(
'Amplitude (%)', fontsize=font_size)
6821 ax2.set_ylabel(
'Phase (deg)', fontsize=font_size)
6823 outp = os.path.join(outpath,
'calibration.png')
6826 fig.savefig(outp, bbox_inches=
'tight')
6833 from lalinference
import SimBurstChooseFDWaveform,SimBurstChooseTDWaveform
6834 from lalinference
import SimBurstImplementedFDApproximants,SimBurstImplementedTDApproximants
6835 from lal
import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT,CreateReverseREAL8FFTPlan
6836 from lal
import LIGOTimeGPS
6837 import lalinference
as lalinf
6838 from lal
import ComputeDetAMResponse, GreenwichMeanSiderealTime, LIGOTimeGPS
6840 from math
import cos,sin,sqrt
6841 from igwn_ligolw
import lsctables
6842 from igwn_ligolw
import utils
6845 from numpy
import arange,real,absolute,fabs,pi
6846 from matplotlib
import pyplot
as plt
6851 colors_inj={
'H1':
'r',
'L1':
'g',
'V1':
'm',
'I1':
'b',
'J1':
'y'}
6852 colors_rec={
'H1':
'k',
'L1':
'k',
'V1':
'k',
'I1':
'k',
'J1':
'k'}
6854 from igwn_ligolw
import ligolw
6855 from igwn_ligolw
import table
6856 class LIGOLWContentHandlerExtractSimBurstTable(ligolw.LIGOLWContentHandler):
6857 def __init__(self,document):
6858 ligolw.LIGOLWContentHandler.__init__(self,document)
6859 self.tabname=lsctables.SimBurstTable.tableName
6861 self.tableElementName=
''
6862 def startElement(self,name,attrs):
6863 if attrs.has_key(
'Name')
and table.Table.TableName(attrs[
'Name'])==self.tabname:
6864 self.tableElementName=name
6866 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6869 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6870 def endElement(self,name):
6871 if self.intable: ligolw.LIGOLWContentHandler.endElement(self,name)
6872 if self.intable
and name==self.tableElementName: self.intable=
False
6879 deltaF = 1.0 / (length* deltaT);
6883 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(
int(length), 1 );
6884 freqToTimeFFTPlan = CreateReverseREAL8FFTPlan(
int(length), 1 );
6885 window=CreateTukeyREAL8Window(
int(length),2.0*pad*srate/length);
6887 segStart=939936910.000
6888 strainFinj= CreateCOMPLEX16FrequencySeries(
"strainF",segStart,0.0,deltaF,DimensionlessUnit,
int(length/2. +1));
6889 strainTinj=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6890 strainFrec= CreateCOMPLEX16FrequencySeries(
"strainF",segStart,0.0,deltaF,DimensionlessUnit,
int(length/2. +1));
6891 strainTrec=CreateREAL8TimeSeries(
"strainT",segStart,0.0,1.0/srate,DimensionlessUnit,
int(length));
6902 inj_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6903 rec_strains=dict((i,{
"T":{
'x':
None,
'strain':
None},
"F":{
'x':
None,
'strain':
None}})
for i
in ifos)
6908 if simburst
is not None:
6911 xmldoc = utils.load_filename(simburst,contenthandler=LIGOLWContentHandlerExtractSimBurstTable)
6912 tbl = lsctables.SimBurstTable.get_table(xmldoc)
6918 print(
"Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,simburst))
6921 REAL8time=tbl.time_geocent_gps+1e-9*tbl.time_geocent_gps_ns
6922 GPStime=LIGOTimeGPS(REAL8time)
6923 GlobREAL8time=(REAL8time)
6924 strainTinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6925 strainFinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6930 polar_e_angle=tbl.pol_ellipse_angle
6931 polar_e_ecc=tbl.pol_ellipse_e
6933 BurstExtraParams=
None
6934 wf=
str(tbl.waveform)
6936 injapproximant=lalinf.GetBurstApproximantFromString(wf)
6941 if SimBurstImplementedFDApproximants(injapproximant):
6943 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6944 elif SimBurstImplementedTDApproximants(injapproximant):
6946 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6948 print(
"\nThe approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%injapproximant)
6952 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6955 tCinstrain=np.floor(REAL8time-float(strainTinj.epoch))/deltaT
6958 tSinstrain=
int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT)
6959 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT-tSinstrain
6962 for k
in np.arange(tSinstrain):
6963 strainTinj.data.data[k]=0.0
6965 for k
in np.arange(plus.data.length):
6966 strainTinj.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6968 for k
in np.arange(strainTinj.data.length- (tSinstrain +plus.data.length)):
6969 strainTinj.data.data[k+tSinstrain+plus.data.length]=0.0
6970 for k
in np.arange(strainTinj.data.length):
6971 strainTinj.data.data[k]*=window.data.data[k]
6972 np.savetxt(
'file.out',zip(np.array([strainTinj.epoch + j*deltaT
for j
in arange(strainTinj.data.length)]),np.array([strainTinj.data.data[j]
for j
in arange(strainTinj.data.length)])))
6974 inj_strains[ifo][
"T"][
'strain']=np.array([strainTinj.data.data[j]
for j
in arange(strainTinj.data.length)])
6975 inj_strains[ifo][
"T"][
'x']=np.array([strainTinj.epoch + j*deltaT
for j
in arange(strainTinj.data.length)])
6977 for j
in arange(strainFinj.data.length):
6978 strainFinj.data.data[j]=0.0
6979 REAL8TimeFreqFFT(strainFinj,strainTinj,timeToFreqFFTPlan);
6980 twopit=2.*np.pi*(rem*deltaT)
6981 for k
in arange(strainFinj.data.length):
6982 re = cos(twopit*deltaF*k)
6983 im = -sin(twopit*deltaF*k)
6984 strainFinj.data.data[k]*= (re + 1j*im);
6986 inj_strains[ifo][
"F"][
'strain']=np.array([strainFinj.data.data[k]
for k
in arange(
int(strainFinj.data.length))])
6987 inj_strains[ifo][
"F"][
'x']=np.array([strainFinj.f0+ k*strainFinj.deltaF
for k
in arange(
int(strainFinj.data.length))])
6988 elif inj_domain==
'F':
6989 for k
in np.arange(strainFinj.data.length):
6990 if k<plus.data.length:
6991 strainFinj.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6993 strainFinj.data.data[k]=0.0
6994 twopit=2.*np.pi*(REAL8time-float(strainFinj.epoch))
6995 for k
in arange(strainFinj.data.length):
6996 re = cos(twopit*deltaF*k)
6997 im = -sin(twopit*deltaF*k)
6998 strainFinj.data.data[k]*= (re + 1j*im);
7000 inj_strains[ifo][
"F"][
'strain']=np.array([strainFinj.data.data[k]
for k
in arange(
int(strainFinj.data.length))])
7001 inj_strains[ifo][
"F"][
'x']=np.array([strainFinj.f0+ k*strainFinj.deltaF
for k
in arange(
int(strainFinj.data.length))])
7004 if f0
is not None and f0
is not np.nan:
7005 if q
is not None and q
is not np.nan:
7007 if f0-6.*sigmaF>plot_fmin:
7008 plot_fmin=f0-6.*sigmaF
7009 if f0+6.*sigmaF<plot_fmax:
7010 plot_fmax=f0+6.*sigmaF
7012 if REAL8time-6.*sigmaT<plot_tmin:
7013 plot_tmin=REAL8time-6.*sigmaT
7014 if REAL8time+6.*sigmaT>plot_tmax:
7015 plot_tmax=REAL8time+6.*sigmaT
7017 if dur
is not None and dur
is not np.nan:
7018 sigmaF=1./sqrt(2.)/pi/dur
7019 if 0+6.*sigmaF<plot_fmax:
7020 plot_fmax=0+6.*sigmaF
7023 if REAL8time-6.*sigmaT<plot_tmin:
7024 plot_tmin=REAL8time-6.*sigmaT
7025 if REAL8time+6.*sigmaT>plot_tmax:
7026 plot_tmax=REAL8time+6.*sigmaT
7032 _,which=pos._posMap()
7034 if 'time' in pos.names:
7035 REAL8time=pos[
'time'].samples[which][0]
7036 elif 'time_maxl' in pos.names:
7037 REAL8time=pos[
'time_maxl'].samples[which][0]
7038 elif 'time_mean' in pos.names:
7039 REAL8time=pos[
'time_mean'].samples[which][0]
7040 elif 'time_min' in pos.names
and 'time_max' in pos.names:
7041 REAL8time=pos[
'time_min'].samples[which][0]+0.5*(pos[
'time_max'].samples[which][0]-pos[
'time_min'].samples[which][0])
7043 print(
"ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
7050 approximant=
int(pos[
'LAL_APPROXIMANT'].samples[which][0])
7054 GPStime=LIGOTimeGPS(REAL8time)
7055 if GlobREAL8time
is None:
7056 GlobREAL8time=REAL8time
7057 strainTrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7058 strainFrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7059 if "duration" in pos.names:
7060 dur=pos[
"duration"].samples[which][0]
7063 if "quality" in pos.names:
7064 q=pos[
'quality'].samples[which][0]
7067 if 'frequency' in pos.names:
7068 f0=pos[
'frequency'].samples[which][0]
7072 hrss=pos[
'hrss'].samples[which][0]
7074 hrss=np.exp(pos[
'loghrss'].samples[which][0])
7075 if np.isnan(q)
and not np.isnan(dur):
7078 if 'alpha' in pos.names:
7079 alpha=pos[
'alpha'].samples[which][0]
7081 polar_e_ecc=pos[
'polar_eccentricity'].samples[which][0]
7082 elif 'polar_ellipse_angle' in pos.names:
7083 polar_e_angle=pos[
'polar_ellipse_angle'].samples[which][0]
7084 polar_e_ecc=pos[
'polar_eccentricity'].samples[which][0]
7086 BurstExtraParams=
None
7090 if SimBurstImplementedFDApproximants(approximant):
7092 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7093 elif SimBurstImplementedTDApproximants(approximant):
7095 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7097 print(
"The approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%approximant)
7099 ra=pos[
'ra'].samples[which][0]
7100 dec=pos[
'dec'].samples[which][0]
7101 psi=pos[
'psi'].samples[which][0]
7104 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
7107 tCinstrain=np.floor(REAL8time-float(strainTrec.epoch))/deltaT
7109 tSinstrain=
int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT)
7112 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT-tSinstrain
7116 for k
in np.arange(tSinstrain):
7117 strainTrec.data.data[k]=0.0
7119 for k
in np.arange(plus.data.length):
7120 strainTrec.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7122 for k
in np.arange(strainTrec.data.length- (tSinstrain +plus.data.length)):
7123 strainTrec.data.data[k+tSinstrain+plus.data.length]=0.0
7124 for k
in np.arange(strainTrec.data.length):
7125 strainTrec.data.data[k]*=window.data.data[k]
7127 rec_strains[ifo][
"T"][
'strain']=np.array([strainTrec.data.data[j]
for j
in arange(strainTrec.data.length)])
7128 rec_strains[ifo][
"T"][
'x']=np.array([strainTrec.epoch + j*deltaT
for j
in arange(strainTrec.data.length)])
7130 for j
in arange(strainFrec.data.length):
7131 strainFrec.data.data[j]=0.0
7132 REAL8TimeFreqFFT(strainFrec,strainTrec,timeToFreqFFTPlan);
7133 twopit=2.*np.pi*(rem*deltaT)
7134 for k
in arange(strainFrec.data.length):
7135 re = cos(twopit*deltaF*k)
7136 im = -sin(twopit*deltaF*k)
7137 strainFrec.data.data[k]*= (re + 1j*im);
7139 rec_strains[ifo][
"F"][
'strain']=np.array([strainFrec.data.data[k]
for k
in arange(
int(strainFrec.data.length))])
7140 rec_strains[ifo][
"F"][
'x']=np.array([strainFrec.f0+ k*strainFrec.deltaF
for k
in arange(
int(strainFrec.data.length))])
7141 elif rec_domain==
'F':
7142 for k
in np.arange(strainFrec.data.length):
7143 if k<plus.data.length:
7144 strainFrec.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7146 strainFrec.data.data[k]=0.0
7147 twopit=2.*np.pi*(REAL8time-float(strainFrec.epoch))
7148 for k
in arange(strainFrec.data.length):
7149 re = cos(twopit*deltaF*k)
7150 im = -sin(twopit*deltaF*k)
7151 strainFrec.data.data[k]*= (re + 1j*im);
7153 rec_strains[ifo][
"F"][
'strain']=np.array([strainFrec.data.data[k]
for k
in arange(
int(strainFrec.data.length))])
7154 rec_strains[ifo][
"F"][
'x']=np.array([strainFrec.f0+ k*strainFrec.deltaF
for k
in arange(
int(strainFrec.data.length))])
7157 if f0
is not None and f0
is not np.nan:
7158 if q
is not None and q
is not np.nan:
7160 if f0-6.*sigmaF>plot_fmin:
7161 plot_fmin=f0-6.*sigmaF
7162 if f0+6.*sigmaF<plot_fmax:
7163 plot_fmax=f0+6.*sigmaF
7165 if REAL8time-6.*sigmaT<plot_tmin:
7166 plot_tmin=REAL8time-6.*sigmaT
7167 if REAL8time+6.*sigmaT>plot_tmax:
7168 plot_tmax=REAL8time+6.*sigmaT
7170 if dur
is not None and dur
is not np.nan:
7171 sigmaF=1./sqrt(2.)/pi/dur
7172 if 0+6.*sigmaF<plot_fmax:
7173 plot_fmax=0+6.*sigmaF
7176 if REAL8time-6.*sigmaT<plot_tmin:
7177 plot_tmin=REAL8time-6.*sigmaT
7178 if REAL8time+6.*sigmaT>plot_tmax:
7179 plot_tmax=REAL8time+6.*sigmaT
7181 myfig=plt.figure(1,figsize=(10,7))
7189 if rec_domain
is not None and inj_domain
is not None:
7190 if rec_domain==
"T" and inj_domain==
"T":
7192 elif rec_domain
is not None:
7195 elif inj_domain
is not None:
7199 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=
False,sharey=
False)
7200 plt.setp(A,figwidth=10,figheight=7)
7201 for (r,i)
in zip(np.arange(rows),ifos):
7202 for c
in np.arange(cols):
7204 if type(ax)==np.ndarray:
7208 if rec_strains[i][
"T"][
'strain']
is not None or rec_strains[i][
"F"][
'strain']
is not None:
7210 if global_domain==
"T":
7211 ax.plot(rec_strains[i][
"T"][
'x'],rec_strains[i][
"T"][
'strain'],colors_rec[i],label=
'%s maP'%i,linewidth=5)
7212 ax.set_xlim([plot_tmin,plot_tmax])
7215 data=rec_strains[i][
"F"][
'strain']
7216 f=rec_strains[i][
"F"][
'x']
7217 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7219 ax.plot(f[mask],real(ys[mask]),
'-',color=colors_rec[i],label=
'%s maP'%i,linewidth=5)
7220 ax.set_xlim([plot_fmin,plot_fmax])
7222 data=rec_strains[i][
"F"][
'strain']
7223 f=rec_strains[i][
"F"][
'x']
7224 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7226 ax.loglog(f[mask],absolute(ys[mask]),
'--',color=colors_rec[i],linewidth=5)
7227 ax.grid(
True,which=
'both')
7228 ax.set_xlim([plot_fmin,plot_fmax])
7229 if inj_strains[i][
"T"][
'strain']
is not None or inj_strains[i][
"F"][
'strain']
is not None:
7231 if global_domain==
"T":
7232 ax.plot(inj_strains[i][
"T"][
'x'],inj_strains[i][
"T"][
'strain'],colors_inj[i],label=
'%s inj'%i,linewidth=2)
7233 ax.set_xlim([plot_tmin,plot_tmax])
7235 data=inj_strains[i][
"F"][
'strain']
7236 f=inj_strains[i][
"F"][
'x']
7237 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7239 ax.plot(f[mask],real(ys[mask]),
'-',color=colors_inj[i],label=
'%s inj'%i,linewidth=2)
7240 ax.set_xlim([plot_fmin,plot_fmax])
7242 data=inj_strains[i][
"F"][
'strain']
7243 f=inj_strains[i][
"F"][
'x']
7244 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7246 ax.loglog(f[mask],absolute(ys[mask]),
'--',color=colors_inj[i],linewidth=2)
7247 ax.grid(
True,which=
'both')
7248 ax.set_xlim([plot_fmin,plot_fmax])
7251 if global_domain==
"T":
7252 ax.set_title(
r"$h(t)$",fontsize=font_size)
7254 ax.set_title(
r"$\Re[h(f)]$",fontsize=font_size)
7256 ax.set_title(
r"$|h(f)|$",fontsize=font_size)
7259 if global_domain==
"T":
7260 ax.set_xlabel(
"time [s]",fontsize=font_size)
7262 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
7264 ax.set_xlabel(
"frequency [Hz]",fontsize=font_size)
7266 ax.legend(loc=
'best')
7270 A.savefig(os.path.join(path,
'WF_DetFrame.png'),bbox_inches=
'tight')
7271 return inj_strains, rec_strains
7273def make_1d_table(html,legend,label,pos,pars,noacf,GreedyRes,onepdfdir,sampsdir,savepdfs,greedy,analyticLikelihood,nDownsample):
7275 from numpy
import unique, sort
7276 global confidenceLevels
7277 confidence_levels=confidenceLevels
7282 if set(pos.names)-set(pars)==set(pos.names):
7286 tabid=
'onedmargtable_'+label.lower()
7287 html_ompdf=html.add_collapse_section(
'1D marginal posterior PDFs (%s)'%label,legend=legend,innertable_id=tabid)
7290 html_ompdf_write=
'<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th><th>Autocorrelation</th></tr>'%tabid
7292 html_ompdf_write=
'<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>'%tabid
7295 if 'chain' in pos.names:
7296 data,header=pos.samples()
7297 par_index=pos.names.index(
'cycle')
7298 chain_index=pos.names.index(
"chain")
7299 chains=unique(pos[
"chain"].samples)
7300 chainCycles = [sort(data[ data[:,chain_index] == chain, par_index ])
for chain
in chains]
7303 for cycles
in chainCycles:
7305 chainNcycles.append(cycles[-1] - cycles[0])
7306 chainNskips.append(cycles[1] - cycles[0])
7308 chainNcycles.append(1)
7309 chainNskips.append(1)
7310 elif 'cycle' in pos.names:
7311 cycles = sort(pos[
'cycle'].samples)
7313 Ncycles = cycles[-1]-cycles[0]
7314 Nskip = cycles[1]-cycles[0]
7320 for par_name
in pars:
7321 par_name=par_name.lower()
7323 pos[par_name.lower()]
7328 par_bin=GreedyRes[par_name]
7330 print(
"Bin size is not set for %s, skipping binning."%par_name)
7334 binParams={par_name:par_bin}
7339 print(
"Using greedy 1-d binning credible regions\n")
7341 toppoints,injectionconfidence,reses,injection_area,cl_intervals=
greedy_bin_one_param(pos,binParams,confidence_levels)
7344 print(
"Using 2-step KDE 1-d credible regions\n")
7346 if pos[par_name].injval
is None:
7349 injCoords=[pos[par_name].injval]
7350 _,reses,injstats=
kdtree_bin2Step(pos,[par_name],confidence_levels,injCoords=injCoords)
7351 if injstats
is not None:
7352 injectionconfidence=injstats[3]
7353 injection_area=injstats[4]
7355 print(
"Generating 1D plot for %s."%par_name)
7359 if analyticLikelihood:
7360 pdf = analyticLikelihood.pdf(par_name)
7361 cdf = analyticLikelihood.cdf(par_name)
7363 oneDPDFParams={par_name:50}
7366 except Exception
as exc:
7368 f
"failed to produce plot for {par_name}: "
7369 f
"[{type(exc).__name__}] {exc}",
7373 figname=par_name+
'.png'
7374 oneDplotPath=os.path.join(onepdfdir,figname)
7375 plotFig.savefig(oneDplotPath)
7376 if(savepdfs): plotFig.savefig(os.path.join(onepdfdir,par_name+
'.pdf'))
7380 print(
"r of injected value of %s (bins) = %f"%(par_name, rbins))
7383 myfig=plt.figure(figsize=(4,3.5),dpi=200)
7384 pos_samps=pos[par_name].samples
7385 if not (
"chain" in pos.names):
7388 plt.plot(pos_samps,
'k.', markersize=5, alpha=0.5, linewidth=0.0, figure=myfig)
7389 maxLen=len(pos_samps)
7394 data,header=pos.samples()
7395 par_index=pos.names.index(par_name)
7396 chain_index=pos.names.index(
"chain")
7397 chains=unique(pos[
"chain"].samples)
7398 chainData=[data[ data[:,chain_index] == chain, par_index ]
for chain
in chains]
7399 chainDataRanges=[range(len(cd))
for cd
in chainData]
7400 maxLen=
max([len(cd)
for cd
in chainData])
7401 for rng, data
in zip(chainDataRanges, chainData):
7402 plt.plot(rng, data, marker=
'.', markersize=1, alpha=0.5, linewidth=0.0,figure=myfig)
7403 plt.title(
"Gelman-Rubin R = %g"%(pos.gelman_rubin(par_name)))
7411 injpar=pos[par_name].injval
7413 if injpar
is not None:
7415 minrange=min(pos_samps)-0.05*(
max(pos_samps)-min(pos_samps))
7416 maxrange=
max(pos_samps)+0.05*(
max(pos_samps)-min(pos_samps))
7417 if minrange<injpar
and maxrange>injpar:
7418 plt.axhline(injpar, color=
'r', linestyle=
'-.',linewidth=4)
7419 myfig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_samps.png')))
7420 if(savepdfs): myfig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_samps.pdf')))
7424 acffig=plt.figure(figsize=(4,3.5),dpi=200)
7425 if not (
"chain" in pos.names):
7429 lines=plt.plot(acf,
'k.', marker=
'.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7431 if nDownsample
is None:
7432 plt.title(
'Autocorrelation Function')
7433 elif 'cycle' in pos.names:
7434 last_color = lines[-1].get_color()
7435 plt.axvline(acl/Nskip, linestyle=
'-.', color=last_color)
7436 plt.title(
'ACL = %i N = %i'%(acl,Neff))
7437 acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.png')))
7438 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.pdf')))
7448 for rng, data, Nskip, Ncycles
in zip(chainDataRanges, chainData, chainNskips, chainNcycles):
7452 lines=plt.plot(acf,
'k.', marker=
'.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7454 if nDownsample
is not None:
7455 last_color = lines[-1].get_color()
7456 plt.axvline(acl/Nskip, linestyle=
'-.', color=last_color)
7457 if nDownsample
is None:
7458 plt.title(
'Autocorrelation Function')
7460 plt.title(
'ACL = %i N = %i'%(
max(acls),Nsamps))
7461 acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.png')))
7462 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace(
'.png',
'_acf.pdf')))
7471 acfhtml=
'<td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_acf.png')+
'"/></td>'
7473 acfhtml=
'<td>ACF generation failed!</td>'
7474 html_ompdf_write+=
'<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+
'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_samps.png')+
'"/></td>'+acfhtml+
'</tr>'
7476 html_ompdf_write+=
'<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+
'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace(
'.png',
'_samps.png')+
'"/></td></tr>'
7478 html_ompdf_write+=
'</table>'
7479 html_ompdf.write(html_ompdf_write)
def __init__(self, *args)
Return analytic likelihood values.
def names(self)
Return list of parameter names described by analytic likelihood function.
def __init__(self, covariance_matrix_files, mean_vector_files)
Prepare analytic likelihood for the given parameters.
def cdf(self, param)
Return PDF function for parameter.
def pdf(self, param)
Return PDF function for parameter.
Data structure for a table of posterior samples .
def __init__(self, commonResultsFormatData, SimBurstTableEntry=None, injFref=None, SnglBurstList=None, name=None, description=None)
Constructor.
def _inj_longitude(self, inj)
Dec tick locations with some intelligence.
def __init__(self, min=-pi_constant/2.0, max=pi_constant/2.0)
object to store the structure of a kd tree
def setSplit(self, dimension, value)
def setImportance(self, sampleNumber, volume)
def __init__(self, bounding_box, left_child=None, right_child=None)
def search(self, coordinates)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def objects(self)
Returns the objects in the tree.
def __init__(self, objects)
Construct a kD-tree from a sequence of objects.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def volume(self)
Returns the volume of the bounding box of the tree.
def _bounds_of_objects(self)
def _longest_dimension(self)
def right(self)
Returns the right tree.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
def _same_coords(self, objects)
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def fillNewTree(self, boxing=64, isArea=False)
copies tree structure, but with KDSkeleton as the new nodes.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def objects(self)
Returns the objects in the tree.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
def right(self)
Returns the right tree.
def volume(self)
Returns the volume of the bounding box of the tree.
def _same_coords(self, objects)
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def search(self, coordinates, boxing=64)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def __init__(self, objects, boundingbox, dims=0)
Construct a kD-tree from a sequence of objects.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
A parser for the output of Bayesian parameter estimation codes.
def _clear_infmcmc_header(self, infile)
def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample)
def _common_to_pos(self, infile, info=[None, None])
def __init__(self, inputtype)
def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _xml_to_pos(self, infile)
def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs)
def _find_infmcmc_f_lower(self, runInfo)
def _infmcmc_to_pos(self, files, outdir=None, deltaLogP=None, fixedBurnins=None, nDownsample=None, oldMassConvention=False, **kwargs)
def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False)
def parse(self, files, **kwargs)
Parse files.
def _swaplabel12(self, label)
def _VOTTABLE2pos(self, table)
def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _find_infmcmc_f_ref(self, runInfo)
def _ns_to_pos(self, files, Nlive=None, Npost=None, posfilename='posterior_samples.dat')
def _followupmcmc_to_pos(self, files)
Data structure for a table of posterior samples .
def stdevs(self)
Return dict {paramName:paramStandardDeviation} .
def maxP(self)
Return the maximum a posteriori probability and the corresponding set of parameters.
def set_triggers(self, triggers)
Set the trigger values of the parameters.
def __init__(self, commonResultsFormatData, SimInspiralTableEntry=None, inj_spin_frame='OrbitalL', injFref=100, SnglInspiralList=None, name=None, description=None)
Constructor.
def bySample(self)
Generate a forward iterator over the list of samples corresponding to the data stored within the Post...
def __len__(self)
Container method.
def __iter__(self)
Container method.
def extend_posterior(self)
Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object.
def delete_NaN_entries(self, param_list)
Remove samples containing NaN in request params.
def name(self)
Return qualified string containing the 'name' of the Posterior instance.
def gelman_rubin(self, pname)
Returns an approximation to the Gelman-Rubin statistic (see Gelman, A.
def di_evidence(self, boxing=64)
Returns the log of the direct-integration evidence for the posterior samples.
def __getitem__(self, key)
Container method .
def _average_posterior(self, samples, post_name)
def _inj_spins(self, inj, frame='OrbitalL')
def forward(self)
Generate a forward iterator (in sense of list of names) over Posterior with name,one_d_pos.
def triggers(self)
Return the trigger values .
def append(self, one_d_posterior)
Container method.
def _gettrigpar(self, paramname)
def description(self)
Return qualified string containing a 'description' of the Posterior instance.
def bootstrap(self)
Returns a new Posterior object that contains a bootstrap sample of self.
def means(self)
Return dict {paramName:paramMean} .
def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias=0)
def delete_samples_by_idx(self, samples)
Remove samples from all OneDPosteriors.
def maxL(self)
Return the maximum likelihood probability and the corresponding set of parameters.
def injection(self)
Return the injected values.
def _getinjpar(self, paramname)
def healpix_map(self, resol, nest=True)
Returns a healpix map in the pixel ordering that represents the posterior density (per square degree)...
def _print_table_row(self, name, entries)
def _total_incl_restarts(self, samples)
def __str__(self)
Define a string representation of the Posterior class ; returns a html formatted table of various pro...
def DIC(self)
Returns the Deviance Information Criterion estimated from the posterior samples.
def write_to_file(self, fname)
Dump the posterior table to a file in the 'common format'.
def names(self)
Return list of parameter names.
def samples(self)
Return an (M,N) numpy.array of posterior samples; M = len(self); N = dim(self) .
def pop(self, param_name)
Container method.
def append_mapping(self, new_param_names, func, post_names)
Append posteriors pos1,pos2,...=func(post_names)
def elliptical_subregion_evidence(self)
Returns an approximation to the log(evidence) obtained by fitting an ellipse around the highest-poste...
def _inj_longitude(self, inj)
def medians(self)
Return dict {paramName:paramMedian} .
def longest_chain_cycles(self)
Returns the number of cycles in the longest chain.
def dim(self)
Return number of parameters.
def set_injection(self, injection)
Set the injected values of the parameters.
def harmonic_mean_evidence(self)
Returns the log of the harmonic mean evidence for the set of posterior samples.
A data structure representing one parameter in a chain of posterior samples.
def name(self)
Return the string literal name of the parameter.
def median(self)
Return the median value for the marginal PDF on the parameter.
def delete_samples_by_idx(self, samples)
Remove samples from posterior, analagous to numpy.delete but opperates in place.
def injval(self)
Return the injected value set at construction .
def set_trigvals(self, new_trigvals)
Set the trigger values of the parameter.
def __len__(self)
Container method.
def __init__(self, name, posterior_samples, injected_value=None, injFref=None, trigger_values=None, prior=None)
Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
def stacc(self)
Return the 'standard accuracy statistic' (stacc) of the marginal posterior of the parameter.
def set_injval(self, new_injval)
Set the injected/real value of the parameter.
def trigvals(self)
Return the trigger values set at construction.
def mean(self)
Return the arithmetic mean for the marginal PDF on the parameter.
def __getitem__(self, idx)
Container method .
def prob_interval(self, intervals)
Evaluate probability intervals.
def KL(self)
Returns the KL divergence between the prior and the posterior.
def gaussian_kde(self)
Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
def stdev(self)
Return the standard deviation of the marginal PDF on the parameter.
def samples(self)
Return a 1D numpy.array of the samples.
A single parameter sample object, suitable for inclusion in a kD-tree.
def __getitem__(self, key)
Return the element with the corresponding name.
def coord(self)
Return the coordinates for the parameter sample.
def __init__(self, sample_array, headers, coord_names)
Given the sample array, headers for the values, and the names of the desired coordinates,...
RA tick locations with some intelligence.
def __init__(self, min=0.0, max=2.0 *pi_constant)
def start(self, tag, attrib)
A base class for representing web content using ElementTree .
def tab(self, idtable=None)
def toprettyxml(self)
Return a pretty-printed XML string of the htmlPage.
def insert_row(self, tab, label=None)
Insert row in table tab.
def append(self, element)
def insert_td(self, row, td, label=None, legend=None)
Insert cell td into row row.
def __init__(self, tag, attrib=None, parent=None)
def a(self, url, linktext)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, table_id=None, start_closed=True)
A concrete class for generating an XHTML(1) document.
def add_section(self, section_name, legend=None)
def add_collapse_section(self, section_name, legend=None, innertable_id=None, start_closed=True)
Create a section embedded into a table that can be collapsed with a button.
def add_section_to_element(self, section_name, parent)
Create a section which is not appended to the body of html, but to the parent Element.
def __init__(self, title=None, css=None, javascript=None, toc=False)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, blank=False)
def plot_two_param_greedy_bins_hist(posterior, greedy2Params, confidence_levels)
Histograms of the ranked pixels produced by the 2-parameter greedy binning algorithm colured by their...
def cred_interval(x, cl=.9, lower=True)
Return location of lower or upper confidence levels Args: x: List of samples.
def histogram2D(posterior, greedy2Params, confidence_levels)
Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual disc...
def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None)
Plot calibration posterior estimates for a spline model in log space.
def lambda_a(redshift, nonGR_alpha, lambda_eff, distance)
Converting from the effective wavelength-like parameter to lambda_A: lambda_A = lambda_{eff}*(D_alpha...
def spline_angle_xform(delta_psi)
Returns the angle in degrees corresponding to the spline calibration parameters delta_psi.
def q2eta(q)
Utility function for converting q to eta.
def plot_one_param_pdf(posterior, plot1DParams, analyticPDF=None, analyticCDF=None, plotkde=False)
Plots a 1D histogram and (gaussian) kernel density estimate of the distribution of posterior samples ...
def det_end_time(ifo_prefix, inj)
def autocorrelation(series)
Returns an estimate of the autocorrelation function of a given series.
def array_ang_sep(vec1, vec2)
Find angles between vectors in rows of numpy arrays.
def parse_converge_output_section(fo)
def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref, phiref)
changes for testing Lorentz violations made till here
def plot_sky_map(hpmap, outdir, inj=None, nest=True)
Plots a sky map from a healpix map, optionally including an injected position.
def mc2q(mc, eta)
Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
def kdtree_bin2Step(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10, injCoords=None, alternate=False, fraction=0.5, skyCoords=False)
input: posterior class instance, list of confidence levels, optional choice of inital parameter space...
def plot_calibration_pos(pos, level=.9, outpath=None)
def orbital_momentum_mag(fref, m1, m2, eta)
def plot_burst_waveform(pos=None, simburst=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def plot_two_param_kde(posterior, plot2DkdeParams)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def make_1d_table(html, legend, label, pos, pars, noacf, GreedyRes, onepdfdir, sampsdir, savepdfs, greedy, analyticLikelihood, nDownsample)
def plot_label(param)
A lookup table for plot labels.
def effectiveSampleSize(samples, Nskip=1)
Compute the effective sample size, calculating the ACL using only the second half of the samples to a...
def random_split(items, fraction)
def plot_waveform(pos=None, siminspiral=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def getDecString(radians, accuracy='auto')
def plot_psd(psd_files, outpath=None, f_min=30.)
def autocorrelation_length_estimate(series, acf=None, M=5, K=2)
Attempts to find a self-consistent estimate of the autocorrelation length of a given series.
def contigious_interval_one_param(posterior, contInt1Params, confidence_levels)
Calculates the smallest contigious 1-parameter confidence interval for a set of given confidence leve...
def as_array(table)
Workaround for missing astropy.table.Table.as_array method, which was added in Astropy 1....
def addSample(tree, coordinates)
def cart2sph(x, y, z)
Utility function to convert cartesian coords to r,theta,phi.
def vo_nest2pos(nsresource, Nlive=None)
Parse a VO Table RESOURCE containing nested sampling output and return a VOTable TABLE element with p...
def find_ndownsample(samples, nDownsample)
Given a list of files, threshold value, and a desired number of outputs posterior samples,...
def skymap_inj_pvalue(hpmap, inj, nest=True)
Returns the greedy p-value estimate for the given injection.
def symm_tidal_params(lambda1, lambda2, q)
Calculate best tidal parameters [Eqs.
def kdtree_bin_sky_area(posterior, confidence_levels, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def getRAString(radians, accuracy='auto')
def kdtree_bin_sky_volume(posterior, confidence_levels)
def plot_one_param_pdf_kde(fig, onedpos)
def q2ms(mc, q)
Utility function for converting mchirp,q to component masses.
def skyArea(bounds)
functions used in 2stage kdtree
def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance)
Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A: A = (lambda_A/h)^...
def plot_two_param_greedy_bins_contourf(posteriors_by_name, greedy2Params, confidence_levels, colors_by_name, figsize=(7, 6), dpi=120, figposition=[0.3, 0.3, 0.5, 0.5], legend='right', hatches_by_name=None)
def confidence_interval_uncertainty(cl, cl_bounds, posteriors)
Returns a tuple (relative_change, fractional_uncertainty, percentile_uncertainty) giving the uncertai...
def ang_dist(long1, lat1, long2, lat2)
Find the angular separation of (long1,lat1) and (long2,lat2), which are specified in radians.
def pol2cart(long, lat)
Utility function to convert longitude,latitude on a unit sphere to cartesian co-ordinates.
def kdtree_bin(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def plot_corner(posterior, levels, parnames=None)
Make a corner plot using the triangle module (See http://github.com/dfm/corner.py)
def sph2cart(r, theta, phi)
Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
def get_inj_by_time(injections, time)
Filter injections to find the injection with end time given by time +/- 0.1s.
def array_dot(vec1, vec2)
Calculate dot products between vectors in rows of numpy arrays.
def rotation_matrix(angle, direction)
Compute general rotation matrices for a given angles and direction vectors.
def greedy_bin_two_param(posterior, greedy2Params, confidence_levels)
Determine the 2-parameter Bayesian Confidence Intervals using a greedy binning algorithm.
def chi_precessing(m1, a1, tilt1, m2, a2, tilt2)
Calculate the magnitude of the effective precessing spin following convention from Phys.
def skymap_confidence_areas(hpmap, cls)
Returns the area (in square degrees) for each confidence level with a greedy binning algorithm for th...
def readCoincXML(xml_file, trignum)
def calculate_redshift(distance, h=0.6790, om=0.3065, ol=0.6935, w0=-1.0)
Calculate the redshift from the luminosity distance measurement using the Cosmology Calculator provid...
def replace_column(table, old, new)
Workaround for missing astropy.table.Table.replace_column method, which was added in Astropy 1....
def mc2ms(mc, eta)
Utility function for converting mchirp,eta to component masses.
def array_polar_ang(vec)
Find polar angles of vectors in rows of a numpy array.
def component_momentum(m, a, theta, phi)
Calculate BH angular momentum vector.
def ROTATEY(angle, vx, vy, vz)
def DistanceMeasure(redshift, nonGR_alpha)
D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720 D_alpha calculated from int...
def integrand_distance(redshift, nonGR_alpha)
Following functions added for testing Lorentz violations.
def plot_two_param_kde_greedy_levels(posteriors_by_name, plot2DkdeParams, levels, colors_by_name, line_styles=__default_line_styles, figsize=(4, 3), dpi=250, figposition=[0.2, 0.2, 0.48, 0.75], legend='right', hatches_by_name=None, Npixels=50)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def orbital_momentum(fref, m1, m2, inclination)
Calculate orbital angular momentum vector.
def ROTATEZ(angle, vx, vy, vz)
def source_mass(mass, redshift)
Calculate source mass parameter for mass m as: m_source = m / (1.0 + z) For a parameter m.
def greedy_bin_one_param(posterior, greedy1Param, confidence_levels)
Determine the 1-parameter Bayesian Confidence Interval using a greedy binning algorithm.
def spin_angles(fref, mc, eta, incl, a1, theta1, phi1, a2=None, theta2=None, phi2=None)
Calculate physical spin angles.
def bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, quantity, fits)
Convenience functions.
def read_samples(filename, path=None, tablename=POSTERIOR_SAMPLES)
Read an HDF5 sample chain file.
def draw_posterior_many(datas, Nlives, verbose=False)
Draw samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array datas,...
def draw_N_posterior_many(datas, Nlives, Npost, verbose=False)
Draw Npost samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array dat...