LALInference 4.1.10.1-bf6a62b
bayespputils.py
Go to the documentation of this file.
1# -*- coding: utf-8 -*-
2#
3# bayespputils.py
4#
5# Copyright 2010
6# Benjamin Aylott <benjamin.aylott@ligo.org>,
7# Benjamin Farr <bfarr@u.northwestern.edu>,
8# Will M. Farr <will.farr@ligo.org>,
9# John Veitch <john.veitch@ligo.org>,
10# Salvatore Vitale <salvatore.vitale@ligo.org>,
11# Vivien Raymond <vivien.raymond@ligo.org>
12#
13# This program is free software; you can redistribute it and/or modify
14# it under the terms of the GNU General Public License as published by
15# the Free Software Foundation; either version 2 of the License, or
16# (at your option) any later version.
17#
18# This program is distributed in the hope that it will be useful,
19# but WITHOUT ANY WARRANTY; without even the implied warranty of
20# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21# GNU General Public License for more details.
22#
23# You should have received a copy of the GNU General Public License
24# along with this program; if not, write to the Free Software
25# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
26# MA 02110-1301, USA.
27
28#===============================================================================
29# Preamble
30#===============================================================================
31
32"""
33This module contains classes and functions for post-processing the output
34of the Bayesian parameter estimation codes.
35"""
36
37#standard library imports
38import os
39import sys
40import warnings
41from math import cos,ceil,floor,sqrt,pi as pi_constant
42from xml.dom import minidom
43from operator import itemgetter
44
45#related third party imports
46import matplotlib
47matplotlib.use('agg')
48from .io import read_samples
49import healpy as hp
50import astropy.table
51import numpy as np
52np.random.seed(42)
53from numpy import fmod
54from matplotlib import pyplot as plt,lines as mpl_lines
55from scipy import stats
56from scipy import special
57from scipy import signal
58from scipy.optimize import newton
59from scipy import interpolate
60from scipy import integrate
61import random
62import socket
63from itertools import combinations
64from lalinference import LALInferenceHDF5PosteriorSamplesDatasetName as posterior_grp_name
65import re
66
67try:
68 import lalsimulation as lalsim
69except ImportError:
70 print('Cannot import lalsimulation SWIG bindings')
71 raise
72
73try:
74 from .imrtgr.nrutils import bbh_average_fits_precessing
75except ImportError:
76 print('Cannot import lalinference.imrtgr.nrutils. Will suppress final parameter and peak luminosity calculations.')
77
78from matplotlib.ticker import ScalarFormatter
79
80try:
81 hostname_short=socket.gethostbyaddr(socket.gethostname())[0].split('.',1)[1]
82except:
83 hostname_short='Unknown'
84if hostname_short=='ligo.caltech.edu' or hostname_short=='cluster.ldas.cit': #The CIT cluster has troubles with the default 'cm' font. 'custom' has the least troubles, but does not include \odot
85 matplotlib.rcParams.update(
86 {'mathtext.fontset' : "custom",
87 'mathtext.fallback' : 'cm'
88 })
89
90from xml.etree.cElementTree import Element, SubElement, tostring, XMLParser
91
92#local application/library specific imports
93import lal
94from lal import LIGOTimeGPS, TimeDelayFromEarthCenter
95from . import git_version
96
97__author__="Ben Aylott <benjamin.aylott@ligo.org>, Ben Farr <bfarr@u.northwestern.edu>, Will M. Farr <will.farr@ligo.org>, John Veitch <john.veitch@ligo.org>, Vivien Raymond <vivien.raymond@ligo.org>"
98__version__= "git id %s"%git_version.id
99__date__= git_version.date
100
101def get_end(siminspiral):
102 return siminspiral.geocent_end_time + 1e-9*siminspiral.geocent_end_time_ns
103
104def replace_column(table, old, new):
105 """Workaround for missing astropy.table.Table.replace_column method,
106 which was added in Astropy 1.1.
107
108 FIXME: remove this function when LALSuite depends on Astropy >= 1.1."""
109 index = table.colnames.index(old)
110 table.remove_column(old)
111 table.add_column(astropy.table.Column(new, name=old), index=index)
112
113def as_array(table):
114 """Workaround for missing astropy.table.Table.as_array method,
115 which was added in Astropy 1.0.
116
117 FIXME: remove this function when LALSuite depends on Astropy >= 1.0."""
118 try:
119 return table.as_array()
120 except:
121 return table._data
122
123#===============================================================================
124# Constants
125#===============================================================================
126#Parameters which are not to be exponentiated when found
127logParams=['logl','loglh1','loglh2','logll1','loglv1','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','logw','logprior','logpost','nulllogl','chain_log_evidence','chain_delta_log_evidence','chain_log_noise_evidence','chain_log_bayes_factor']
128#Parameters known to cbcBPP
129relativePhaseParams=[ a+b+'_relative_phase' for a,b in combinations(['h1','l1','v1'],2)]
130snrParams=['snr','optimal_snr','matched_filter_snr','coherence'] + ['%s_optimal_snr'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_amp'%(i) for i in ['h1','l1','v1']] + ['%s_cplx_snr_arg'%(i) for i in ['h1', 'l1', 'v1']] + relativePhaseParams
131calAmpParams=['calamp_%s'%(ifo) for ifo in ['h1','l1','v1']]
132calPhaseParams=['calpha_%s'%(ifo) for ifo in ['h1','l1','v1']]
133calParams = calAmpParams + calPhaseParams
134# Masses
135massParams=['m1','m2','chirpmass','mchirp','mc','eta','q','massratio','asym_massratio','mtotal','mf','mf_evol','mf_nonevol']
136#Spins
137spinParamsPrec=['a1','a2','phi1','theta1','phi2','theta2','costilt1','costilt2','costheta_jn','cosbeta','tilt1','tilt1_isco','tilt2','tilt2_isco','phi_jl','theta_jn','phi12','phi12_isco','af','af_evol','af_nonevol','afz','afz_evol','afz_nonevol']
138spinParamsAli=['spin1','spin2','a1z','a2z']
139spinParamsEff=['chi','effectivespin','chi_eff','chi_tot','chi_p']
140spinParams=spinParamsPrec+spinParamsEff+spinParamsAli
141# Source frame params
142cosmoParam=['m1_source','m2_source','mtotal_source','mc_source','redshift','mf_source','mf_source_evol','mf_source_nonevol','m1_source_maxldist','m2_source_maxldist','mtotal_source_maxldist','mc_source_maxldist','redshift_maxldist','mf_source_maxldist','mf_source_maxldist_evol','mf_source_maxldist_nonevol']
143#Strong Field
144ppEParams=['ppEalpha','ppElowera','ppEupperA','ppEbeta','ppElowerb','ppEupperB','alphaPPE','aPPE','betaPPE','bPPE']
145tigerParams= ['dchi%i'%(i) for i in range(8)] + ['dchi%il'%(i) for i in [5,6] ] + ['dxi%d'%(i+1) for i in range(6)] + ['dalpha%i'%(i+1) for i in range(5)] + ['dbeta%i'%(i+1) for i in range(3)] + ['dsigma%i'%(i+1) for i in range(4)] + ['dipolecoeff'] + ['dchiminus%i'%(i) for i in [1,2]] + ['dchiMinus%i'%(i) for i in [1,2]] + ['db1','db2','db3','db4','dc1','dc2','dc4','dcl'] + ['damp21', 'damp33']
146qnmtestParams=['domega220','dtau220','domega210','dtau210','domega330','dtau330','domega440','dtau440','domega550','dtau550']
147bransDickeParams=['omegaBD','ScalarCharge1','ScalarCharge2']
148massiveGravitonParams=['lambdaG']
149lorentzInvarianceViolationParams=['log10lambda_a','lambda_a','log10lambda_eff','lambda_eff','log10livamp','liv_amp']
150tidalParams=['lambda1','lambda2','lam_tilde','dlam_tilde','lambdat','dlambdat','lambdas','bluni']
151fourPiecePolyParams=['logp1','gamma1','gamma2','gamma3']
152spectralParams=['sdgamma0','sdgamma1','sdgamma2','sdgamma3']
153energyParams=['e_rad', 'e_rad_evol', 'e_rad_nonevol', 'l_peak', 'l_peak_evol', 'l_peak_nonevol', 'e_rad_maxldist', 'e_rad_maxldist_evol', 'e_rad_maxldist_nonevol']
154spininducedquadParams = ['dquadmon1', 'dquadmon2', 'dquadmona', 'dquadmona']
155strongFieldParams = (
156 ppEParams + tigerParams + bransDickeParams + massiveGravitonParams
157 + tidalParams + fourPiecePolyParams + spectralParams + energyParams
158 + lorentzInvarianceViolationParams + spininducedquadParams + qnmtestParams
159)
160
161#Extrinsic
162distParams=['distance','distMPC','dist','distance_maxl']
163incParams=['iota','inclination','cosiota']
164polParams=['psi','polarisation','polarization']
165skyParams=['ra','rightascension','declination','dec']
166phaseParams=['phase', 'phi0','phase_maxl']
167#Times
168timeParams=['time','time_mean']
169endTimeParams=['l1_end_time','h1_end_time','v1_end_time']
170#others
171statsParams=['logprior','logl','deltalogl','deltaloglh1','deltalogll1','deltaloglv1','deltaloglh2','deltaloglg1']
172calibParams=['calpha_l1','calpha_h1','calpha_v1','calamp_l1','calamp_h1','calamp_v1']
173
174## Greedy bin sizes for cbcBPP and confidence leves used for the greedy bin intervals
175confidenceLevels=[0.67,0.9,0.95,0.99]
176
177greedyBinSizes={'mc':0.025,'m1':0.1,'m2':0.1,'mass1':0.1,'mass2':0.1,'mtotal':0.1,'mc_source':0.025,'m1_source':0.1,'m2_source':0.1,'mtotal_source':0.1,'mc_source_maxldist':0.025,'m1_source_maxldist':0.1,'m2_source_maxldist':0.1,'mtotal_source_maxldist':0.1,'eta':0.001,'q':0.01,'asym_massratio':0.01,'iota':0.01,'cosiota':0.02,'time':1e-4,'time_mean':1e-4,'distance':1.0,'dist':1.0,'distance_maxl':1.0,'redshift':0.01,'redshift_maxldist':0.01,'mchirp':0.025,'chirpmass':0.025,'spin1':0.04,'spin2':0.04,'a1z':0.04,'a2z':0.04,'a1':0.02,'a2':0.02,'phi1':0.05,'phi2':0.05,'theta1':0.05,'theta2':0.05,'ra':0.05,'dec':0.05,'chi':0.05,'chi_eff':0.05,'chi_tot':0.05,'chi_p':0.05,'costilt1':0.02,'costilt2':0.02,'thatas':0.05,'costheta_jn':0.02,'beta':0.05,'omega':0.05,'cosbeta':0.02,'ppealpha':1.0,'ppebeta':1.0,'ppelowera':0.01,'ppelowerb':0.01,'ppeuppera':0.01,'ppeupperb':0.01,'polarisation':0.04,'rightascension':0.05,'declination':0.05,'massratio':0.001,'inclination':0.01,'phase':0.05,'tilt1':0.05,'tilt2':0.05,'phi_jl':0.05,'theta_jn':0.05,'phi12':0.05,'flow':1.0,'phase_maxl':0.05,'calamp_l1':0.01,'calamp_h1':0.01,'calamp_v1':0.01,'calpha_h1':0.01,'calpha_l1':0.01,'calpha_v1':0.01,'logdistance':0.1,'psi':0.1,'costheta_jn':0.1,'mf':0.1,'mf_evol':0.1,'mf_nonevol':0.1,'mf_source':0.1,'mf_source_evol':0.1,'mf_source_nonevol':0.1,'mf_source_maxldist':0.1,'mf_source_maxldist_evol':0.1,'mf_source_maxldist_nonevol':0.1,'af':0.02,'af_evol':0.02,'af_nonevol':0.02,'afz':0.02,'afz_evol':0.01,'afz_nonevol':0.01,'e_rad':0.1,'e_rad_evol':0.1,'e_rad_nonevol':0.1,'e_rad_maxldist':0.1,'e_rad_maxldist_evol':0.1,'e_rad_maxldist_nonevol':0.1,'l_peak':0.1,'l_peak_evol':0.1,'l_peak_nonevol':0.1}
178for s in snrParams:
179 greedyBinSizes[s]=0.05
180for derived_time in ['h1_end_time','l1_end_time','v1_end_time','h1l1_delay','l1v1_delay','h1v1_delay']:
181 greedyBinSizes[derived_time]=greedyBinSizes['time']
182for derived_phase in relativePhaseParams:
183 greedyBinSizes[derived_phase]=0.05
184for param in tigerParams + bransDickeParams + massiveGravitonParams + lorentzInvarianceViolationParams + qnmtestParams:
185 greedyBinSizes[param]=0.01
186for param in tidalParams:
187 greedyBinSizes[param]=2.5
188for param in fourPiecePolyParams:
189 greedyBinSizes[param]=2.5
190for param in spectralParams:
191 greedyBinSizes[param]=2.5
192for param in spininducedquadParams:
193 greedyBinSizes[param]=2.5
194 #Confidence levels
195for loglname in statsParams:
196 greedyBinSizes[loglname]=0.1
197
198def det_end_time(ifo_prefix, inj):
199 location = lal.cached_detector_by_prefix[ifo_prefix].location
200 ra = inj.longitude
201 dec = inj.latitude
202 t_gps = LIGOTimeGPS(inj.geocent_end_time, inj.geocent_end_time_ns)
203 t0 = inj.geocent_end_time + 1e-9*inj.geocent_end_time_ns
204 return t0 + TimeDelayFromEarthCenter(location,ra,dec,t_gps)
205
206#Pre-defined ordered list of line styles for use in matplotlib contour plots.
207__default_line_styles=['solid', 'dashed', 'dashdot', 'dotted']
208#Pre-defined ordered list of matplotlib colours for use in plots.
209__default_color_lst=['r','b','y','g','c','m']
210#A default css string for use in html results pages.
211__default_css_string="""
212p,h1,h2,h3,h4,h5
213{
214font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
215}
216
217p
218{
219font-size:14px;
220}
221
222h1
223{
224font-size:20px;
225}
226
227h2
228{
229font-size:18px;
230}
231
232h3
233{
234font-size:16px;
235}
236
237
238
239table
240{
241font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
242width:100%;
243border-collapse:collapse;
244}
245td,th
246{
247font-size:12px;
248border:1px solid #B5C1CF;
249padding:3px 7px 2px 7px;
250}
251th
252{
253font-size:14px;
254text-align:left;
255padding-top:5px;
256padding-bottom:4px;
257background-color:#B3CEEF;
258color:#ffffff;
259}
260#postable tr:hover
261{
262background: #DFF4FF;
263}
264#covtable tr:hover
265{
266background: #DFF4FF;
267}
268#statstable tr:hover
269{
270background: #DFF4FF;
271}
272
273img {
274 max-width: 510px;
275 max-height: 510px;
276 width:100%;
277 eight:100%;
278}
279
280.ppsection
281{
282border-bottom-style:double;
283}
284
285"""
286__default_javascript_string='''
287//<![CDATA[
288function toggle_visibility(tbid,lnkid)
289{
290
291 if(document.all){document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'block' ? 'none' : 'block';}
292
293 else{document.getElementById(tbid).style.display = document.getElementById(tbid).style.display == 'table' ? 'none' : 'table';}
294
295 document.getElementById(lnkid).value = document.getElementById(lnkid).value == '[-] Collapse' ? '[+] Expand' : '[-] Collapse';
296
297 }
298 //]]>
299
300'''
301
302
303#===============================================================================
304# Function to return the correct prior distribution for selected parameters
305#===============================================================================
306def get_prior(name):
307 distributions={
308 'm1':'uniform',
309 'm2':'uniform',
310 'mc':None,
311 'eta':None,
312 'q':None,
313 'mtotal':'uniform',
314 'm1_source':None,
315 'm2_source':None,
316 'mtotal_source':None,
317 'mc_source':None,
318 'redshift':None,
319 'm1_source_maxldist':None,
320 'm2_source_maxldist':None,
321 'mtotal_source_maxldist':None,
322 'mc_source_maxldist':None,
323 'redshift_maxldist':None,
324 'mf':None,
325 'mf_evol':None,
326 'mf_nonevol':None,
327 'mf_source':None,
328 'mf_source_evol':None,
329 'mf_source_nonevol':None,
330 'mf_source_maxldist':None,
331 'mf_source_maxldist_evol':None,
332 'mf_source_maxldist_nonevol':None,
333 'af':None,
334 'af_evol':None,
335 'af_nonevol':None,
336 'afz':None,
337 'afz_evol':None,
338 'afz_nonevol':None,
339 'e_rad':None,
340 'e_rad_evol':None,
341 'e_rad_nonevol':None,
342 'e_rad_maxldist':None,
343 'e_rad_maxldist_evol':None,
344 'e_rad_maxldist_nonevol':None,
345 'l_peak':None,
346 'l_peak_evol':None,
347 'l_peak_nonevol':None,
348 'spin1':'uniform',
349 'spin2':'uniform',
350 'a1':'uniform',
351 'a2':'uniform',
352 'a1z':'uniform',
353 'a2z':'uniform',
354 'theta1':'uniform',
355 'theta2':'uniform',
356 'phi1':'uniform',
357 'phi2':'uniform',
358 'chi_eff':None,
359 'chi_tot':None,
360 'chi_p':None,
361 'tilt1':None,
362 'tilt2':None,
363 'tilt1_isco':None,
364 'tilt2_isco':None,
365 'costilt1':'uniform',
366 'costilt2':'uniform',
367 'iota':'np.cos',
368 'cosiota':'uniform',
369 'time':'uniform',
370 'time_mean':'uniform',
371 'dist':'x**2',
372 'distance_maxl':'x**2',
373 'ra':'uniform',
374 'dec':'np.cos',
375 'phase':'uniform',
376 'psi':'uniform',
377 'theta_jn':'np.sin',
378 'costheta_jn':'uniform',
379 'beta':None,
380 'cosbeta':None,
381 'phi_jl':None,
382 'phi12':None,
383 'phi12_isco':None,
384 'logl':None,
385 'h1_end_time':None,
386 'l1_end_time':None,
387 'v1_end_time':None,
388 'h1l1_delay':None,
389 'h1v1_delay':None,
390 'l1v1_delay':None,
391 'lambdat' :None,
392 'dlambdat':None,
393 'lambda1' : 'uniform',
394 'lambda2': 'uniform',
395 'lam_tilde' : None,
396 'dlam_tilde': None,
397 'lambdas':'uniform',
398 'bluni':'uniform',
399 'logp1':None,
400 'gamma1':None,
401 'gamma2':None,
402 'gamma3':None,
403 'sdgamma0': None,
404 'sdgamma1': None,
405 'sdgamma2': None,
406 'sdgamma3': None,
407 'calamp_h1' : 'uniform',
408 'calamp_l1' : 'uniform',
409 'calpha_h1' : 'uniform',
410 'calpha_l1' : 'uniform',
411 'polar_eccentricity':'uniform',
412 'polar_angle':'uniform',
413 'alpha':'uniform'
414 }
415 try:
416 return distributions(name)
417 except:
418 return None
419
420#===============================================================================
421# Function used to generate plot labels.
422#===============================================================================
423def plot_label(param):
424 """
425 A lookup table for plot labels.
426 """
427 m1_names = ['mass1', 'm1']
428 m2_names = ['mass2', 'm2']
429 mc_names = ['mc','mchirp','chirpmass']
430 eta_names = ['eta','massratio','sym_massratio']
431 q_names = ['q','asym_massratio']
432 iota_names = ['iota','incl','inclination']
433 dist_names = ['dist','distance']
434 ra_names = ['rightascension','ra']
435 dec_names = ['declination','dec']
436 phase_names = ['phi_orb', 'phi', 'phase', 'phi0']
437 gr_test_names = ['dchiMinus2','dchiMinus1'] + ['dchi%d'%i for i in range(8)]+['dchil%d'%i for i in [5,6]]+['dxi%d'%(i+1) for i in range(6)]+['dalpha%d'%(i+1) for i in range(5)]+['dbeta%d'%(i+1) for i in range(3)]+['dsigma%d'%(i+1) for i in range(4)] + ['dipolecoeff'] + ['db1','db2','db3','db4','dc1','dc2','dc4','dcl']+['damp21', 'damp33']
438
439 labels={
440 'm1':r'$m_1\,(\mathrm{M}_\odot)$',
441 'm2':r'$m_2\,(\mathrm{M}_\odot)$',
442 'mc':r'$\mathcal{M}\,(\mathrm{M}_\odot)$',
443 'eta':r'$\eta$',
444 'q':r'$q$',
445 'mtotal':r'$M_\mathrm{total}\,(\mathrm{M}_\odot)$',
446 'm1_source':r'$m_{1}^\mathrm{source}\,(\mathrm{M}_\odot)$',
447 'm2_source':r'$m_{2}^\mathrm{source}\,(\mathrm{M}_\odot)$',
448 'mtotal_source':r'$M_\mathrm{total}^\mathrm{source}\,(\mathrm{M}_\odot)$',
449 'mc_source':r'$\mathcal{M}^\mathrm{source}\,(\mathrm{M}_\odot)$',
450 'redshift':r'$z$',
451 'm1_source_maxldist':r'$m_{1}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
452 'm2_source_maxldist':r'$m_{2}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
453 'mtotal_source_maxldist':r'$M_\mathrm{total}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
454 'mc_source_maxldist':r'$\mathcal{M}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
455 'redshift_maxldist':r'$z^\mathrm{maxLdist}$',
456 'mf':r'$M_\mathrm{final}\,(\mathrm{M}_\odot)$',
457 'mf_evol':r'$M_\mathrm{final}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
458 'mf_nonevol':r'$M_\mathrm{final}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
459 'mf_source':r'$M_\mathrm{final}^\mathrm{source}\,(\mathrm{M}_\odot)$',
460 'mf_source_evol':r'$M_\mathrm{final}^\mathrm{source, evol}\,(\mathrm{M}_\odot)$',
461 'mf_source_nonevol':r'$M_\mathrm{final}^\mathrm{source, non-evol}\,(\mathrm{M}_\odot)$',
462 'mf_source_maxldist':r'$M_\mathrm{final}^\mathrm{source - maxLdist}\,(\mathrm{M}_\odot)$',
463 'mf_source_maxldist_evol':r'$M_\mathrm{final}^\mathrm{source, evol - maxLdist}\,(\mathrm{M}_\odot)$',
464 'mf_source_maxldist_nonevol':r'$M_\mathrm{final}^\mathrm{source, non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
465 'af':r'$a_\mathrm{final}$',
466 'af_evol':r'$a_\mathrm{final}^\mathrm{evol}$',
467 'af_nonevol':r'$a_\mathrm{final}^\mathrm{non-evol}$',
468 'afz':r'$a_{\mathrm{final}, z}$',
469 'afz_evol':r'$a_{\mathrm{final}, z}^\mathrm{evol}$',
470 'afz_nonevol':r'$a_{\mathrm{final}, z}^\mathrm{non-evol}$',
471 'e_rad':r'$E_\mathrm{rad}\,(\mathrm{M}_\odot)$',
472 'e_rad_evol':r'$E_\mathrm{rad}^\mathrm{evol}\,(\mathrm{M}_\odot)$',
473 'e_rad_nonevol':r'$E_\mathrm{rad}^\mathrm{non-evol}\,(\mathrm{M}_\odot)$',
474 'e_rad_maxldist':r'$E_\mathrm{rad}^\mathrm{maxLdist}\,(\mathrm{M}_\odot)$',
475 'e_rad_maxldist_evol':r'$E_\mathrm{rad}^\mathrm{evol - maxLdist}\,(\mathrm{M}_\odot)$',
476 'e_rad_maxldist_nonevol':r'$E_\mathrm{rad}^\mathrm{non-evol - maxLdist}\,(\mathrm{M}_\odot)$',
477 'l_peak':r'$L_\mathrm{peak}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
478 'l_peak_evol':r'$L_\mathrm{peak}^\mathrm{evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
479 'l_peak_nonevol':r'$L_\mathrm{peak}^\mathrm{non-evol}\,(10^{56}\,\mathrm{ergs}\,\mathrm{s}^{-1})$',
480 'spin1':r'$S_1$',
481 'spin2':r'$S_2$',
482 'a1':r'$a_1$',
483 'a2':r'$a_2$',
484 'a1z':r'$a_{1z}$',
485 'a2z':r'$a_{2z}$',
486 'theta1':r'$\theta_1\,(\mathrm{rad})$',
487 'theta2':r'$\theta_2\,(\mathrm{rad})$',
488 'phi1':r'$\phi_1\,(\mathrm{rad})$',
489 'phi2':r'$\phi_2\,(\mathrm{rad})$',
490 'chi_eff':r'$\chi_\mathrm{eff}$',
491 'chi_tot':r'$\chi_\mathrm{total}$',
492 'chi_p':r'$\chi_\mathrm{P}$',
493 'tilt1':r'$t_1\,(\mathrm{rad})$',
494 'tilt2':r'$t_2\,(\mathrm{rad})$',
495 'tilt1_isco':r'$t_1^\mathrm{ISCO}\,(\mathrm{rad})$',
496 'tilt2_isco':r'$t_2^\mathrm{ISCO}\,(\mathrm{rad})$',
497 'costilt1':r'$\mathrm{cos}(t_1)$',
498 'costilt2':r'$\mathrm{cos}(t_2)$',
499 'iota':r'$\iota\,(\mathrm{rad})$',
500 'cosiota':r'$\mathrm{cos}(\iota)$',
501 'time':r'$t_\mathrm{c}\,(\mathrm{s})$',
502 'time_mean':r'$<t>\,(\mathrm{s})$',
503 'dist':r'$d_\mathrm{L}\,(\mathrm{Mpc})$',
504 'distance_maxl':r'$d_\mathrm{L}^\mathrm{maxL}\,(\mathrm{Mpc})$',
505 'ra':r'$\alpha$',
506 'dec':r'$\delta$',
507 'phase':r'$\phi\,(\mathrm{rad})$',
508 'phase_maxl':r'$\phi^\mathrm{maxL}\,(\mathrm{rad})$',
509 'psi':r'$\psi\,(\mathrm{rad})$',
510 'theta_jn':r'$\theta_\mathrm{JN}\,(\mathrm{rad})$',
511 'costheta_jn':r'$\mathrm{cos}(\theta_\mathrm{JN})$',
512 'beta':r'$\beta\,(\mathrm{rad})$',
513 'cosbeta':r'$\mathrm{cos}(\beta)$',
514 'phi_jl':r'$\phi_\mathrm{JL}\,(\mathrm{rad})$',
515 'phi12':r'$\phi_\mathrm{12}\,(\mathrm{rad})$',
516 'phi12_isco':r'$\phi_\mathrm{12}^\mathrm{ISCO}\,(\mathrm{rad})$',
517 'logl':r'$\mathrm{log}(\mathcal{L})$',
518 'h1_end_time':r'$t_\mathrm{H}$',
519 'l1_end_time':r'$t_\mathrm{L}$',
520 'v1_end_time':r'$t_\mathrm{V}$',
521 'h1l1_delay':r'$\Delta t_\mathrm{HL}$',
522 'h1v1_delay':r'$\Delta t_\mathrm{HV}$',
523 'l1v1_delay':r'$\Delta t_\mathrm{LV}$',
524 'lambdat' : r'$\tilde{\Lambda}$',
525 'dlambdat': r'$\delta \tilde{\Lambda}$',
526 'lambda1' : r'$\lambda_1$',
527 'lambda2': r'$\lambda_2$',
528 'lam_tilde' : r'$\tilde{\Lambda}$',
529 'dlam_tilde': r'$\delta \tilde{\Lambda}$',
530 'logp1':r'$\log(p_1)$',
531 'gamma1':r'$\Gamma_1$',
532 'gamma2':r'$\Gamma_2$',
533 'gamma3':r'$\Gamma_3$',
534 'sdgamma0' : r'$\gamma_{0}$',
535 'sdgamma1' : r'$\gamma_{1}$',
536 'sdgamma2' : r'$\gamma_{2}$',
537 'sdgamma3' : r'$\gamma_{3}$',
538 'calamp_h1' : r'$\delta A_{H1}$',
539 'calamp_l1' : r'$\delta A_{L1}$',
540 'calpha_h1' : r'$\delta \phi_{H1}$',
541 'calpha_l1' : r'$\delta \phi_{L1}$',
542 'polar_eccentricity':r'$\epsilon_{polar}$',
543 'polar_angle':r'$\alpha_{polar}$',
544 'alpha':r'$\alpha_{polar}$',
545 'dchiminus1':r'$d\chi_{-1}$',
546 'dchiminus2':r'$d\chi_{-2}$',
547 'dchi0':r'$d\chi_0$',
548 'dchi1':r'$d\chi_1$',
549 'dchi2':r'$d\chi_2$',
550 'dchi3':r'$d\chi_3$',
551 'dchi3S':r'$d\chi_{3S}$',
552 'dchi3NS':r'$d\chi_{3NS}$',
553 'dchi4':r'$d\chi_4$',
554 'dchi4S':r'$d\chi_{4S}$',
555 'dchi4NS':r'$d\chi_{4NS}$',
556 'dchi5':r'$d\chi_5$',
557 'dchi5S':r'$d\chi_{5S}$',
558 'dchi5NS':r'$d\chi_{5NS}$',
559 'dchi5l':r'$d\chi_{5l}$',
560 'dchi5lS':r'$d\chi_{5lS}$',
561 'dchi5lNS':r'$d\chi_{5lNS}$',
562 'dchi6':r'$d\chi_6$',
563 'dchi6S':r'$d\chi_{6S}$',
564 'dchi6NS':r'$d\chi_{6NS}$',
565 'dchi6l':r'$d\chi_{6l}$',
566 'dchi7':r'$d\chi_7$',
567 'dchi7S':r'$d\chi_{7S}$',
568 'dchi7NS':r'$d\chi_{7NS}$',
569 'dxi1':r'$d\xi_1$',
570 'dxi2':r'$d\xi_2$',
571 'dxi3':r'$d\xi_3$',
572 'dxi4':r'$d\xi_4$',
573 'dxi5':r'$d\xi_5$',
574 'dxi6':r'$d\xi_6$',
575 'dalpha1':r'$d\alpha_1$',
576 'dalpha2':r'$d\alpha_2$',
577 'dalpha3':r'$d\alpha_3$',
578 'dalpha4':r'$d\alpha_4$',
579 'dalpha5':r'$d\alpha_5$',
580 'dbeta1':r'$d\beta_1$',
581 'dbeta2':r'$d\beta_2$',
582 'dbeta3':r'$d\beta_3$',
583 'dsigma1':r'$d\sigma_1$',
584 'dsigma2':r'$d\sigma_2$',
585 'dsigma3':r'$d\sigma_3$',
586 'dsigma4':r'$d\sigma_4$',
587 'dquadmon1':r'$\delta\kappa_1$',
588 'dquadmon2':r'$\delta\kappa_2$',
589 'dquadmons':r'$\delta\kappa_s$',
590 'dquadmona':r'$\delta\kappa_a$',
591 'domega220':r'$d\omega_{220}$',
592 'dtau220':r'$d\tau_{220}$',
593 'domega210':r'$d\omega_{210}$',
594 'dtau210':r'$d\tau_{210}$',
595 'domega330':r'$d\omega_{330}$',
596 'dtau330':r'$d\tau_{330}$',
597 'domega440':r'$d\omega_{440}$',
598 'dtau440':r'$d\tau_{440}$',
599 'domega550':r'$d\omega_{550}$',
600 'dtau550':r'$d\tau_{550}$',
601 'damp21':r'$\delta A_{21}$',
602 'damp33':r'$\delta A_{33}$',
603 'optimal_snr':r'$\rho^{opt}$',
604 'h1_optimal_snr':r'$\rho^{opt}_{H1}$',
605 'l1_optimal_snr':r'$\rho^{opt}_{L1}$',
606 'v1_optimal_snr':r'$\rho^{opt}_{V1}$',
607 'matched_filter_snr':r'$\rho^{MF}$',
608 'lambdas':r'$\Lambda_S$',
609 'bluni':r'$BL_{uniform}$',
610 'log10lambda_a':r'$\log\lambda_{\mathbb{A}} [\mathrm{m}]$',
611 'log10lambda_eff':r'$\log\lambda_{eff} [\mathrm{m}]$',
612 'lambda_eff':r'$\lambda_{eff} [\mathrm{m}]$',
613 'lambda_a':r'$\lambda_{\mathbb{A}} [\mathrm{m}]$',
614 'liv_amp':r'$\mathbb{A} [\mathrm{{eV}^{2-\alpha}}]$' ,
615 'log10livamp':r'$\log \mathbb{A}[\mathrm{{eV}^{2-\alpha}}]$',
616 'dchikappaS':r'$d\chi_{\kappa_{S}}$',
617 'dchikappaA':r'$d\chi_{\kappa_{A}}$',
618 'dchiMinus1':r'$d\chi_{-1}$',
619 'dchiMinus2':r'$d\chi_{-2}$',
620 'db1':r'$\delta b_1$',
621 'db2':r'$\delta b_2$',
622 'db3':r'$\delta b_3$',
623 'db4':r'$\delta b_4$',
624 'dc1':r'$\delta c_1$',
625 'dc2':r'$\delta c_2$',
626 'dc4':r'$\delta c_4$',
627 'dcl':r'$\delta c_l$',
628
629 }
630
631 # Handle cases where multiple names have been used
632 if param in m1_names:
633 param = 'm1'
634 elif param in m2_names:
635 param = 'm2'
636 elif param in mc_names:
637 param = 'mc'
638 elif param in eta_names:
639 param = 'eta'
640 elif param in q_names:
641 param = 'q'
642 elif param in iota_names:
643 param = 'iota'
644 elif param in dist_names:
645 param = 'dist'
646 elif param in ra_names:
647 param = 'ra'
648 elif param in dec_names:
649 param = 'dec'
650 elif param in phase_names:
651 param = 'phase'
652
653 try:
654 label = labels[param]
655 except KeyError:
656 # Use simple string if no formated label is available for param
657 label = param
658
659 return label
660
661#===============================================================================
662# Class definitions
663#===============================================================================
664
665class PosteriorOneDPDF(object):
666 """
667 A data structure representing one parameter in a chain of posterior samples.
668 The Posterior class generates instances of this class for pivoting onto a given
669 parameter (the Posterior class is per-Sampler oriented whereas this class represents
670 the same one parameter in successive samples in the chain).
671 """
672 def __init__(self,name,posterior_samples,injected_value=None,injFref=None,trigger_values=None,prior=None):
673 """
674 Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
675
676 @param name: A literal string name for the parameter.
677 @param posterior_samples: A 1D array of the samples.
678 @param injected_value: The injected or real value of the parameter.
679 @param injFref: reference frequency for injection
680 @param trigger_values: The trigger values of the parameter (dictionary w/ IFOs as keys).
681 @param prior: The prior value corresponding to each sample.
682 """
683 self.__name=name
684 self.__posterior_samples=np.array(posterior_samples)
685
686 self.__injFref=injFref
687 self.__injval=injected_value
688 self.__trigvals=trigger_values
689 self.__prior=prior
690
691 return
692
693 def __len__(self):
694 """
695 Container method. Defined as number of samples.
696 """
697 return len(self.__posterior_samples)
698
699 def __getitem__(self,idx):
700 """
701 Container method . Returns posterior containing sample idx (allows slicing).
702 """
703 return PosteriorOneDPDF(self.__name, self.__posterior_samples[idx], injected_value=self.__injval, f_ref=self.__f_ref, trigger_values=self.__trigvals)
704
705 @property
706 def name(self):
707 """
708 Return the string literal name of the parameter.
709
710 """
711 return self.__name
712
713 @property
714 def mean(self):
715 """
716 Return the arithmetic mean for the marginal PDF on the parameter.
717
718 """
719 return np.mean(self.__posterior_samples)
720
721 @property
722 def median(self):
723 """
724 Return the median value for the marginal PDF on the parameter.
725
726 """
727 return np.median(self.__posterior_samples)
728
729 @property
730 def stdev(self):
731 """
732 Return the standard deviation of the marginal PDF on the parameter.
733
734 """
735 try:
736 stdev = sqrt(np.var(self.__posterior_samples))
737 if not np.isfinite(stdev):
738 raise OverflowError
739 except OverflowError:
740 mean = np.mean(self.__posterior_samples)
741 stdev = mean * sqrt(np.var(self.__posterior_samples/mean))
742 return stdev
743
744 @property
745 def stacc(self):
746 r"""
747 Return the 'standard accuracy statistic' (stacc) of the marginal
748 posterior of the parameter.
749
750 stacc is a standard deviant incorporating information about the
751 accuracy of the waveform recovery. Defined as the mean of the sum
752 of the squared differences between the points in the PDF
753 (x_i - sampled according to the posterior) and the true value
754 (\f$x_{true}\f$). So for a marginalized one-dimensional PDF:
755 \f$stacc = \sqrt{\frac{1}{N}\sum_{i=1}^N (x_i-x_{\rm true})2}\f$
756
757 """
758 if self.__injval is None:
759 return None
760 else:
761 return np.sqrt(np.mean((self.__posterior_samples - self.__injval)**2.0))
762
763 @property
764 def injval(self):
765 """
766 Return the injected value set at construction . If no value was set
767 will return None .
768
769 """
770 return self.__injval
771
772 @property
773 def trigvals(self):
774 """
775 Return the trigger values set at construction. If no value was set
776 will return None .
777
778 """
779 return self.__trigvals
780
781 #@injval.setter #Python 2.6+
782 def set_injval(self,new_injval):
783 """
784 Set the injected/real value of the parameter.
785
786 @param new_injval: The injected/real value to set.
787 """
788
789 self.__injval=new_injval
790
791 def set_trigvals(self,new_trigvals):
792 """
793 Set the trigger values of the parameter.
794
795 @param new_trigvals: Dictionary containing trigger values with IFO keys.
796 """
797
798 self.__trigvals=new_trigvals
799
800 @property
801 def samples(self):
802 """
803 Return a 1D numpy.array of the samples.
804
805 """
806 return self.__posterior_samples
807
808 def delete_samples_by_idx(self,samples):
809 """
810 Remove samples from posterior, analagous to numpy.delete but opperates in place.
811
812 @param samples: A list containing the indexes of the samples to be removed.
813 """
814 self.__posterior_samples=np.delete(self.__posterior_samples,samples).reshape(-1,1)
815
816 @property
817 def gaussian_kde(self):
818 """
819 Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
820
821 """
822 from numpy import seterr as np_seterr
823 from scipy import seterr as sp_seterr
824
825 np_seterr(under='ignore')
826 sp_seterr(under='ignore')
827 try:
828 return_value=stats.kde.gaussian_kde(np.transpose(self.__posterior_samples))
829 except:
830 exfile=open('exception.out','w')
831 np.savetxt(exfile,self.__posterior_samples)
832 exfile.close()
833 raise
834
835 return return_value
836
837 @property
838 def KL(self):
839 """Returns the KL divergence between the prior and the posterior.
840 It measures the relative information content in nats. The prior is evaluated
841 at run time. It defaults to None. If None is passed, it just returns the information content
842 of the posterior."
843 """
844
845 def uniform(x):
846 return np.array([1./(np.max(x)-np.min(x)) for _ in x])
847
848 posterior, dx = np.histogram(self.samples,bins=36,density=True)
849 from scipy.stats import entropy
850 # check the kind of prior and process the string accordingly
851 prior = get_prior(self.name)
852 if prior is None:
853 raise ValueError
854 elif prior=='uniform':
855 prior+='(self.samples)'
856 elif 'x' in prior:
857 prior.replace('x','self.samples')
858 elif not(prior.startswith('np.')):
859 prior = 'np.'+prior
860 prior+='(self.samples)'
861 else:
862 raise ValueError
863
864 try:
865 prior_dist = eval(prior)
866 except:
867 raise ValueError
868
869 return entropy(posterior, qk=prior_dist)
870
871 def prob_interval(self,intervals):
872 """
873 Evaluate probability intervals.
874
875 @param intervals: A list of the probability intervals [0-1]
876 """
877 list_of_ci=[]
878 samples_temp=np.sort(np.squeeze(self.samples))
879
880 for interval in intervals:
881 if interval<1.0:
882 samples_temp
883 N=np.size(samples_temp)
884 #Find index of lower bound
885 lower_idx=int(floor((N/2.0)*(1-interval)))
886 if lower_idx<0:
887 lower_idx=0
888 #Find index of upper bound
889 upper_idx=N-int(floor((N/2.0)*(1-interval)))
890 if upper_idx>N:
891 upper_idx=N-1
892
893 list_of_ci.append((float(samples_temp[lower_idx]),float(samples_temp[upper_idx])))
894 else:
895 list_of_ci.append((None,None))
896
897 return list_of_ci
898
899class Posterior(object):
900 """
901 Data structure for a table of posterior samples .
902 """
903 def __init__(self,commonResultsFormatData,SimInspiralTableEntry=None,inj_spin_frame='OrbitalL', injFref=100,SnglInspiralList=None,name=None,description=None):
904 """
905 Constructor.
906
907 @param commonResultsFormatData: A 2D array containing the posterior
908 samples and related data. The samples chains form the columns.
909 @param SimInspiralTableEntry: A SimInspiralTable row containing the injected values.
910 @param SnglInspiralList: A list of SnglInspiral objects containing the triggers.
911 @param inj_spin_frame: spin frame
912 @param injFref: reference frequency
913 @param name: optional name
914 @param description: optional description
915
916 """
917 common_output_table_header,common_output_table_raw =commonResultsFormatData
918 self._posterior={}
919 self._injFref=injFref
920 self._injection=SimInspiralTableEntry
921
922 self._triggers=SnglInspiralList
923 self._loglaliases=['deltalogl', 'posterior', 'logl','logL','likelihood']
924 self._logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior']
925
926 common_output_table_header=[i.lower() for i in common_output_table_header]
927
928 # Define XML mapping
929 self._injXMLFuncMap={
930 'mchirp':lambda inj:inj.mchirp,
931 'chirpmass':lambda inj:inj.mchirp,
932 'mc':lambda inj:inj.mchirp,
933 'mass1':lambda inj:inj.mass1,
934 'm1':lambda inj:inj.mass1,
935 'mass2':lambda inj:inj.mass2,
936 'm2':lambda inj:inj.mass2,
937 'mtotal':lambda inj:float(inj.mass1)+float(inj.mass2),
938 'eta':lambda inj:inj.eta,
939 'q':self._inj_q,
940 'asym_massratio':self._inj_q,
941 'massratio':lambda inj:inj.eta,
942 'sym_massratio':lambda inj:inj.eta,
943 'time': lambda inj:float(get_end(inj)),
944 'time_mean': lambda inj:float(get_end(inj)),
945 'end_time': lambda inj:float(get_end(inj)),
946 'phi0':lambda inj:inj.phi0,
947 'phi_orb': lambda inj: inj.coa_phase,
948 'phase': lambda inj: inj.coa_phase,
949 'dist':lambda inj:inj.distance,
950 'distance':lambda inj:inj.distance,
951 'ra':self._inj_longitude,
952 'rightascension':self._inj_longitude,
953 'long':self._inj_longitude,
954 'longitude':self._inj_longitude,
955 'dec':lambda inj:inj.latitude,
956 'declination':lambda inj:inj.latitude,
957 'lat':lambda inj:inj.latitude,
958 'latitude':lambda inj:inj.latitude,
959 'psi': lambda inj: np.mod(inj.polarization, np.pi),
960 'f_ref': lambda inj: self._injFref,
961 'polarisation':lambda inj:inj.polarization,
962 'polarization':lambda inj:inj.polarization,
963 'h1_end_time':lambda inj:det_end_time('H', inj),
964 'l1_end_time':lambda inj:det_end_time('L', inj),
965 'v1_end_time':lambda inj:det_end_time('V', inj),
966 'lal_amporder':lambda inj:inj.amp_order}
967
968 # Add on all spin parameterizations
969 for key, val in self._inj_spins(self._injection, frame=inj_spin_frame).items():
970 self._injXMLFuncMap[key] = val
971
972 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
973
974 self._posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref,trigger_values=self._gettrigpar(param_name))
975
976 if 'mchirp' in common_output_table_header and 'eta' in common_output_table_header \
977 and (not 'm1' in common_output_table_header) and (not 'm2' in common_output_table_header):
978 try:
979 print('Inferring m1 and m2 from mchirp and eta')
980 (m1,m2)=mc2ms(self._posterior['mchirp'].samples, self._posterior['eta'].samples)
981 self._posterior['m1']=PosteriorOneDPDF('m1',m1,injected_value=self._getinjpar('m1'),trigger_values=self._gettrigpar('m1'))
982 self._posterior['m2']=PosteriorOneDPDF('m2',m2,injected_value=self._getinjpar('m2'),trigger_values=self._gettrigpar('m2'))
983 except KeyError:
984 print('Unable to deduce m1 and m2 from input columns')
985
986
987 logLFound=False
988
989 for loglalias in self._loglaliases:
990
991 if loglalias in common_output_table_header:
992 try:
993 self._logL=self._posterior[loglalias].samples
994 except KeyError:
995 print("No '%s' column in input table!"%loglalias)
996 continue
997 logLFound=True
998
999 if not logLFound:
1000 raise RuntimeError("No likelihood/posterior values found!")
1001 self._logP=None
1002
1003 for logpalias in self._logpaliases:
1004 if logpalias in common_output_table_header:
1005 try:
1006 self._logP=self._posterior[logpalias].samples
1007 except KeyError:
1008 print("No '%s' column in input table!"%logpalias)
1009 continue
1010 if not 'log' in logpalias:
1011 self._logP=[np.log(i) for i in self._logP]
1012
1013 if name is not None:
1014 self.__name=name
1015
1016 if description is not None:
1017 self.__description=description
1018
1019 return
1020
1021 def extend_posterior(self):
1022 """
1023 Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object
1024 """
1025 injection=self._injection
1026 pos=self
1027 # Generate component mass posterior samples (if they didnt exist already)
1028 if 'mc' in pos.names:
1029 mchirp_name = 'mc'
1030 elif 'chirpmass' in pos.names:
1031 mchirp_name = 'chirpmass'
1032 else:
1033 mchirp_name = 'mchirp'
1034
1035 if 'asym_massratio' in pos.names:
1036 q_name = 'asym_massratio'
1037 else:
1038 q_name = 'q'
1039
1040 if 'sym_massratio' in pos.names:
1041 eta_name= 'sym_massratio'
1042 elif 'massratio' in pos.names:
1043 eta_name= 'massratio'
1044 else:
1045 eta_name='eta'
1046
1047 if 'mass1' in pos.names and 'mass2' in pos.names:
1048 pos.append_mapping(('m1','m2'), lambda x,y:(x,y), ('mass1','mass2'))
1049
1050 if (mchirp_name in pos.names and eta_name in pos.names) and \
1051 ('mass1' not in pos.names or 'm1' not in pos.names) and \
1052 ('mass2' not in pos.names or 'm2' not in pos.names):
1053
1054 pos.append_mapping(('m1','m2'),mc2ms,(mchirp_name,eta_name))
1055
1056 if (mchirp_name in pos.names and q_name in pos.names) and \
1057 ('mass1' not in pos.names or 'm1' not in pos.names) and \
1058 ('mass2' not in pos.names or 'm2' not in pos.names):
1059
1060 pos.append_mapping(('m1','m2'),q2ms,(mchirp_name,q_name))
1061 pos.append_mapping('eta',q2eta,q_name)
1062
1063 if ('m1' in pos.names and 'm2' in pos.names and not 'mtotal' in pos.names ):
1064 pos.append_mapping('mtotal', lambda m1,m2: m1+m2, ('m1','m2') )
1065
1066 if('a_spin1' in pos.names): pos.append_mapping('a1',lambda a:a,'a_spin1')
1067 if('a_spin2' in pos.names): pos.append_mapping('a2',lambda a:a,'a_spin2')
1068 if('phi_spin1' in pos.names): pos.append_mapping('phi1',lambda a:a,'phi_spin1')
1069 if('phi_spin2' in pos.names): pos.append_mapping('phi2',lambda a:a,'phi_spin2')
1070 if('theta_spin1' in pos.names): pos.append_mapping('theta1',lambda a:a,'theta_spin1')
1071 if('theta_spin2' in pos.names): pos.append_mapping('theta2',lambda a:a,'theta_spin2')
1072
1073 my_ifos=['h1','l1','v1']
1074 for ifo1,ifo2 in combinations(my_ifos,2):
1075 p1=ifo1+'_cplx_snr_arg'
1076 p2=ifo2+'_cplx_snr_arg'
1077 if p1 in pos.names and p2 in pos.names:
1078 delta=np.mod(pos[p1].samples - pos[p2].samples + np.pi ,2.0*np.pi)-np.pi
1079 pos.append(PosteriorOneDPDF(ifo1+ifo2+'_relative_phase',delta))
1080
1081 # Ensure that both theta_jn and inclination are output for runs
1082 # with zero tilt (for runs with tilt, this will be taken care of
1083 # below when the old spin angles are computed as functions of the
1084 # new ones
1085 # Disabled this since the parameters are degenerate and causing problems
1086 #if ('theta_jn' in pos.names) and (not 'tilt1' in pos.names) and (not 'tilt2' in pos.names):
1087 # pos.append_mapping('iota', lambda t:t, 'theta_jn')
1088
1089 # Compute time delays from sky position
1090 try:
1091 if ('ra' in pos.names or 'rightascension' in pos.names) \
1092 and ('declination' in pos.names or 'dec' in pos.names) \
1093 and 'time' in pos.names:
1094 from lal import LIGOTimeGPS, TimeDelayFromEarthCenter
1095 from numpy import array
1096 detMap = {'H1': 'LHO_4k', 'H2': 'LHO_2k', 'L1': 'LLO_4k',
1097 'G1': 'GEO_600', 'V1': 'VIRGO', 'T1': 'TAMA_300'}
1098 if 'ra' in pos.names:
1099 ra_name='ra'
1100 else: ra_name='rightascension'
1101 if 'dec' in pos.names:
1102 dec_name='dec'
1103 else: dec_name='declination'
1104 ifo_times={}
1105 my_ifos=['H1','L1','V1']
1106 for ifo in my_ifos:
1107 inj_time=None
1108 if injection:
1109 inj_time = det_end_time(ifo, injection)
1110 location = lal.cached_detector_by_prefix[ifo].location
1111 ifo_times[ifo]=array(list(map(lambda ra,dec,time: array([time[0]+TimeDelayFromEarthCenter(location,ra[0],dec[0],LIGOTimeGPS(float(time[0])))]), pos[ra_name].samples,pos[dec_name].samples,pos['time'].samples)))
1112 loc_end_time=PosteriorOneDPDF(ifo.lower()+'_end_time',ifo_times[ifo],injected_value=inj_time)
1113 pos.append(loc_end_time)
1114 for ifo1 in my_ifos:
1115 for ifo2 in my_ifos:
1116 if ifo1==ifo2: continue
1117 delay_time=ifo_times[ifo2]-ifo_times[ifo1]
1118 if injection:
1119 inj_delay=float(det_end_time(ifo2, injection)-det_end_time(ifo1, injection))
1120 else:
1121 inj_delay=None
1122 time_delay=PosteriorOneDPDF(ifo1.lower()+ifo2.lower()+'_delay',delay_time,inj_delay)
1123 pos.append(time_delay)
1124 except ImportError:
1125 print('Warning: Could not import lal python bindings, check you ./configured with --enable-swig-python')
1126 print('This means I cannot calculate time delays')
1127
1128 #Calculate new spin angles
1129 new_spin_params = ['tilt1','tilt2','theta_jn','beta']
1130 if not set(new_spin_params).issubset(set(pos.names)):
1131 old_params = ['f_ref',mchirp_name,'eta','iota','a1','theta1','phi1']
1132 if 'a2' in pos.names: old_params += ['a2','theta2','phi2']
1133 try:
1134 pos.append_mapping(new_spin_params, spin_angles, old_params)
1135 except KeyError:
1136 print("Warning: Cannot find spin parameters. Skipping spin angle calculations.")
1137
1138 #Store signed spin magnitudes in separate parameters and make a1,a2 magnitudes
1139 if 'a1' in pos.names:
1140 if 'tilt1' in pos.names:
1141 pos.append_mapping('a1z', lambda a, tilt: a*np.cos(tilt), ('a1','tilt1'))
1142 else:
1143 pos.append_mapping('a1z', lambda x: x, 'a1')
1144 inj_az = None
1145 if injection is not None:
1146 inj_az = injection.spin1z
1147 pos['a1z'].set_injval(inj_az)
1148 pos.pop('a1')
1149 pos.append_mapping('a1', lambda x: np.abs(x), 'a1z')
1150
1151 if 'a2' in pos.names:
1152 if 'tilt2' in pos.names:
1153 pos.append_mapping('a2z', lambda a, tilt: a*np.cos(tilt), ('a2','tilt2'))
1154 else:
1155 pos.append_mapping('a2z', lambda x: x, 'a2')
1156 inj_az = None
1157 if injection is not None:
1158 inj_az = injection.spin2z
1159 pos['a2z'].set_injval(inj_az)
1160 pos.pop('a2')
1161 pos.append_mapping('a2', lambda x: np.abs(x), 'a2z')
1162
1163 #Calculate effective spin parallel to L
1164 if ('m1' in pos.names and 'a1z' in pos.names) and ('m2' in pos.names and 'a2z' in pos.names):
1165 pos.append_mapping('chi_eff', lambda m1,a1z,m2,a2z: (m1*a1z + m2*a2z) / (m1 + m2), ('m1','a1z','m2','a2z'))
1166
1167 #If precessing spins calculate total effective spin
1168 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names):
1169 pos.append_mapping('chi_tot', lambda m1,a1,m2,a2: (m1*a1 + m2*a2) / (m1 + m2), ('m1','a1','m2','a2'))
1170
1171 #Calculate effective precessing spin magnitude
1172 if ('m1' in pos.names and 'a1' in pos.names and 'tilt1' in pos.names) and ('m2' in pos.names and 'a2' in pos.names and 'tilt2' in pos.names):
1173 pos.append_mapping('chi_p', chi_precessing, ['m1', 'a1', 'tilt1', 'm2', 'a2', 'tilt2'])
1174
1175 # Calculate redshift from luminosity distance measurements
1176 if('distance' in pos.names):
1177 pos.append_mapping('redshift', calculate_redshift, 'distance')
1178 elif('dist' in pos.names):
1179 pos.append_mapping('redshift', calculate_redshift, 'dist')
1180 # If using the DistanceMarginalisation, compute the maxL redshift distribution from the maxL d_L
1181 elif('distance_maxl' in pos.names):
1182 pos.append_mapping('redshift_maxldist', calculate_redshift, 'distance_maxl')
1183
1184 # Calculate source mass parameters
1185 if ('m1' in pos.names) and ('redshift' in pos.names):
1186 pos.append_mapping('m1_source', source_mass, ['m1', 'redshift'])
1187
1188 if ('m2' in pos.names) and ('redshift' in pos.names):
1189 pos.append_mapping('m2_source', source_mass, ['m2', 'redshift'])
1190
1191 if ('mtotal' in pos.names) and ('redshift' in pos.names):
1192 pos.append_mapping('mtotal_source', source_mass, ['mtotal', 'redshift'])
1193
1194 if ('mc' in pos.names) and ('redshift' in pos.names):
1195 pos.append_mapping('mc_source', source_mass, ['mc', 'redshift'])
1196
1197 # Calculate source mass parameters if DistanceMarginalisation was used, using the maxL distance and redshift
1198 if ('m1' in pos.names) and ('redshift_maxldist' in pos.names):
1199 pos.append_mapping('m1_source_maxldist', source_mass, ['m1', 'redshift_maxldist'])
1200
1201 if ('m2' in pos.names) and ('redshift_maxldist' in pos.names):
1202 pos.append_mapping('m2_source_maxldist', source_mass, ['m2', 'redshift_maxldist'])
1203
1204 if ('mtotal' in pos.names) and ('redshift_maxldist' in pos.names):
1205 pos.append_mapping('mtotal_source_maxldist', source_mass, ['mtotal', 'redshift_maxldist'])
1206
1207 if ('mc' in pos.names) and ('redshift_maxldist' in pos.names):
1208 pos.append_mapping('mc_source_maxldist', source_mass, ['mc', 'redshift_maxldist'])
1209
1210 # Calling functions testing Lorentz invariance violation
1211 if ('log10lambda_eff' in pos.names) and ('redshift' in pos.names):
1212 pos.append_mapping('log10lambda_a', lambda z,nonGR_alpha,wl,dist:np.log10(lambda_a(z, nonGR_alpha, 10**wl, dist)), ['redshift', 'nonGR_alpha', 'log10lambda_eff', 'dist'])
1213 if ('log10lambda_eff' in pos.names) and ('redshift' in pos.names):
1214 pos.append_mapping('log10livamp', lambda z,nonGR_alpha,wl,dist:np.log10(amplitudeMeasure(z, nonGR_alpha, 10**wl, dist)), ['redshift','nonGR_alpha','log10lambda_eff', 'dist'])
1215 if ('lambda_eff' in pos.names) and ('redshift' in pos.names):
1216 pos.append_mapping('lambda_a', lambda_a, ['redshift', 'nonGR_alpha', 'log10lambda_eff', 'dist'])
1217 if ('lambda_eff' in pos.names) and ('redshift' in pos.names):
1218 pos.append_mapping('liv_amp', amplitudeMeasure, ['redshift','nonGR_alpha','lambda_eff', 'dist'])
1219
1220 #Calculate new tidal parameters
1221 new_tidal_params = ['lam_tilde','dlam_tilde']
1222 old_tidal_params = ['lambda1','lambda2','q']
1223 if 'lambda1' in pos.names or 'lambda2' in pos.names:
1224 try:
1225 pos.append_mapping(new_tidal_params, symm_tidal_params, old_tidal_params)
1226 except KeyError:
1227 print("Warning: Cannot find tidal parameters. Skipping tidal calculations.")
1228
1229 #If new spin params present, calculate old ones
1230 old_spin_params = ['iota', 'theta1', 'phi1', 'theta2', 'phi2', 'beta']
1231 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', 'f_ref','phase']
1232 try:
1233 if pos['f_ref'].samples[0][0]==0.0:
1234 for name in ['flow','f_lower']:
1235 if name in pos.names:
1236 new_spin_params = ['theta_jn', 'phi_jl', 'tilt1', 'tilt2', 'phi12', 'a1', 'a2', 'm1', 'm2', name]
1237 except:
1238 print("No f_ref for SimInspiralTransformPrecessingNewInitialConditions().")
1239 if set(new_spin_params).issubset(set(pos.names)) and not set(old_spin_params).issubset(set(pos.names)):
1240 pos.append_mapping(old_spin_params, physical2radiationFrame, new_spin_params)
1241
1242 #Calculate spin magnitudes for aligned runs
1243 if 'spin1' in pos.names:
1244 inj_a1 = inj_a2 = None
1245 if injection:
1246 inj_a1 = sqrt(injection.spin1x*injection.spin1x + injection.spin1y*injection.spin1y + injection.spin1z*injection.spin1z)
1247 inj_a2 = sqrt(injection.spin2x*injection.spin2x + injection.spin2y*injection.spin2y + injection.spin2z*injection.spin2z)
1248
1249 try:
1250 a1_samps = abs(pos['spin1'].samples)
1251 a1_pos = PosteriorOneDPDF('a1',a1_samps,injected_value=inj_a1)
1252 pos.append(a1_pos)
1253 except KeyError:
1254 print("Warning: problem accessing spin1 values.")
1255
1256 try:
1257 a2_samps = abs(pos['spin2'].samples)
1258 a2_pos = PosteriorOneDPDF('a2',a2_samps,injected_value=inj_a2)
1259 pos.append(a2_pos)
1260 except KeyError:
1261 print("Warning: no spin2 values found.")
1262
1263 # For BBHs: Calculate mass and spin of final merged system, radiated energy, and peak luminosity in GWs
1264
1265 # Only apply fits if this is a BBH run (with no tidal parameters)
1266
1267 if len(np.intersect1d(pos.names,tidalParams)) == 0:
1268
1269 # Set fits to consider (and average over)
1270
1271 FinalSpinFits = ['HBR2016', 'UIB2016', 'HL2016']
1272 FinalMassFits = ['UIB2016', 'HL2016']
1273 LpeakFits = ['UIB2016', 'HL2016']
1274
1275 # If evolved spin angle samples are present, use those to compute the final mass and spin, peak luminosity, and radiated energy; also, use the _evol suffix in the aligned-spin case, since here the spin angles are trivially evolved
1276
1277 spin_angle_suffix = ''
1278 evol_suffix = '_evol'
1279
1280 if all([x in pos.names for x in ['tilt1_isco','tilt2_isco','phi12_isco']]):
1281 spin_angle_suffix = '_isco'
1282 elif all([x in pos.names for x in ['tilt1','tilt2','phi12']]):
1283 evol_suffix = '_nonevol'
1284
1285 zero_vec = np.array([0.])
1286
1287 tilt1_name = 'tilt1' + spin_angle_suffix
1288 tilt2_name = 'tilt2' + spin_angle_suffix
1289 phi12_name = 'phi12' + spin_angle_suffix
1290 mf_name = 'mf' + evol_suffix
1291 mf_source_name = 'mf_source' + evol_suffix
1292 mf_source_maxldist_name = 'mf_source_maxldist' + evol_suffix
1293
1294 if ('m1' in pos.names) and ('m2' in pos.names):
1295 if ('a1' in pos.names) and ('a2' in pos.names):
1296 if (tilt1_name in pos.names) and (tilt2_name in pos.names) and (phi12_name in pos.names):
1297 # Precessing case
1298 print("Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and 3D spins).")
1299 if evol_suffix == '_evol':
1300 print("Applying these to *_isco evolved spin samples and outputting *_evol samples.")
1301 else:
1302 print("Applying these to unevolved spin samples and outputting *_nonevol samples.")
1303 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1304 try:
1305 pos.append_mapping('af' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2, phi12: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, 'af', FinalSpinFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name, phi12_name])
1306 except Exception as e:
1307 print("Could not calculate %s. The error was: %s"%('af' + evol_suffix, str(e)))
1308 try:
1309 pos.append_mapping('afz' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'afz', FinalSpinFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1310 except Exception as e:
1311 print("Could not calculate %s. The error was: %s"%('afz' + evol_suffix, str(e)))
1312 try:
1313 pos.append_mapping(mf_name, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'Mf', FinalMassFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1314 except Exception as e:
1315 print("Could not calculate %s. The error was: %s"%(mf_name, str(e)))
1316 try:
1317 pos.append_mapping('l_peak' + evol_suffix, lambda m1, m2, chi1, chi2, tilt1, tilt2: bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2', 'a1', 'a2', tilt1_name, tilt2_name])
1318 except Exception as e:
1319 print("Could not calculate %s. The error was: %s"%('l_peak' + evol_suffix, str(e)))
1320 elif ('a1z' in pos.names) and ('a2z' in pos.names):
1321 # Aligned-spin case
1322 print("Using averages for final mass, final spin, and peak luminosity (on masses and projected spin components).")
1323 print("Outputting *_evol samples because spin evolution is trivial in this nonprecessing case.")
1324 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1325 try:
1326 # Compute absolute values of spins and compute tilt angles to allow for negative spin values
1327 pos.append_mapping('afz_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'afz', FinalSpinFits), ['m1', 'm2', 'a1z', 'a2z'])
1328 except Exception as e:
1329 print("Could not calculate afz_evol. The error was: %s"%(str(e)))
1330 try:
1331 pos.append_mapping('af_evol', lambda a: abs(a), 'afz_evol')
1332 except Exception as e:
1333 print("Could not calculate af_evol. The error was: %s"%(str(e)))
1334 try:
1335 pos.append_mapping('mf_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'Mf', FinalMassFits), ['m1', 'm2', 'a1z', 'a2z'])
1336 except Exception as e:
1337 print("Could not calculate mf_evol. The error was: %s"%(str(e)))
1338 try:
1339 pos.append_mapping('l_peak_evol', lambda m1, m2, chi1, chi2: bbh_average_fits_precessing(m1, m2, abs(chi1), abs(chi2), 0.5*np.pi*(1. - np.sign(chi1)), 0.5*np.pi*(1. - np.sign(chi2)), zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2', 'a1z', 'a2z'])
1340 except Exception as e:
1341 print("Could not calculate l_peak_evol. The error was: %s"%(str(e)))
1342 else:
1343 print("Could not calculate final parameters or Lpeak. Found samples for a1 and a2 but not for tilt angles and phi12 or spin components (a1z and a2z).")
1344 else:
1345 # Nonspinning case
1346 print("Using averages of fit formulae for final mass, final spin, and peak luminosity (on masses and zero spins).")
1347 print("Outputting *_evol samples because spin evolution is trivial in this nonspinning case.")
1348 print("Final mass fits:", FinalMassFits, "; Final spin fits:", FinalSpinFits, "; Peak luminosity fits:", LpeakFits)
1349 try:
1350 pos.append_mapping('afz_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'afz', FinalSpinFits), ['m1', 'm2'])
1351 except Exception as e:
1352 print("Could not calculate afz_evol. The error was: %s"%(str(e)))
1353 try:
1354 pos.append_mapping('af_evol', lambda a: abs(a), 'afz_evol')
1355 except Exception as e:
1356 print("Could not calculate af_evol. The error was: %s"%(str(e)))
1357 try:
1358 pos.append_mapping('mf_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'Mf', FinalMassFits), ['m1', 'm2'])
1359 except Exception as e:
1360 print("Could not calculate mf_evol. The error was: %s"%(str(e)))
1361 try:
1362 pos.append_mapping('l_peak_evol', lambda m1, m2: bbh_average_fits_precessing(m1, m2, zero_vec, zero_vec, zero_vec, zero_vec, zero_vec, 'Lpeak', LpeakFits), ['m1', 'm2'])
1363 except Exception as e:
1364 print("Could not calculate l_peak_evol. The error was: %s"%(str(e)))
1365
1366 # Convert final mass to source frame
1367 if (mf_name in pos.names) and ('redshift' in pos.names):
1368 try:
1369 pos.append_mapping(mf_source_name, source_mass, [mf_name, 'redshift'])
1370 except Exception as e:
1371 print("Could not calculate final source frame mass. The error was: %s"%(str(e)))
1372
1373 if (mf_name in pos.names) and ('redshift_maxldist' in pos.names):
1374 try:
1375 pos.append_mapping(mf_source_maxldist_name, source_mass, [mf_name, 'redshift_maxldist'])
1376 except Exception as e:
1377 print("Could not calculate final source frame mass using maxldist redshift. The error was: %s"%(str(e)))
1378
1379 # Calculate radiated energy
1380 if ('mtotal_source' in pos.names) and (mf_source_name in pos.names):
1381 try:
1382 pos.append_mapping('e_rad' + evol_suffix, lambda mtot_s, mf_s: mtot_s-mf_s, ['mtotal_source', mf_source_name])
1383 except Exception as e:
1384 print("Could not calculate radiated energy. The error was: %s"%(str(e)))
1385
1386 if ('mtotal_source_maxldist' in pos.names) and (mf_source_maxldist_name in pos.names):
1387 try:
1388 pos.append_mapping('e_rad_maxldist' + evol_suffix, lambda mtot_s, mf_s: mtot_s-mf_s, ['mtotal_source_maxldist', mf_source_maxldist_name])
1389 except Exception as e:
1390 print("Could not calculate radiated energy using maxldist redshift results. The error was: %s"%(str(e)))
1391
1392 def bootstrap(self):
1393 """
1394 Returns a new Posterior object that contains a bootstrap
1395 sample of self.
1396
1397 """
1398 names=[]
1399 samples=[]
1400 for name,oneDpos in self._posterior.items():
1401 names.append(name)
1402 samples.append(oneDpos.samples)
1403
1404 samplesBlock=np.hstack(samples)
1405
1406 bootstrapSamples=samplesBlock[:,:]
1407 Nsamp=bootstrapSamples.shape[0]
1408
1409 rows=np.vsplit(samplesBlock,Nsamp)
1410
1411 for i in range(Nsamp):
1412 bootstrapSamples[i,:]=random.choice(rows)
1413
1414 return Posterior((names,bootstrapSamples),self._injection,self._triggers)
1415
1416 def delete_samples_by_idx(self,samples):
1417 """
1418 Remove samples from all OneDPosteriors.
1419
1420 @param samples: The indexes of the samples to be removed.
1421 """
1422 for name,pos in self:
1423 pos.delete_samples_by_idx(samples)
1424 return
1425
1426 def delete_NaN_entries(self,param_list):
1427 """
1428 Remove samples containing NaN in request params.
1429
1430 @param param_list: The parameters to be checked for NaNs.
1431 """
1432 nan_idxs = np.array(())
1433 nan_dict = {}
1434 for param in param_list:
1435 nan_bool_array = np.isnan(self[param].samples).any(1)
1436 idxs = np.where(nan_bool_array == True)[0]
1437 if len(idxs) > 0:
1438 nan_dict[param]=len(idxs)
1439 nan_idxs = np.append(nan_idxs, idxs)
1440 total_samps = len(self)
1441 nan_samps = len(nan_idxs)
1442 if nan_samps != 0:
1443 print("WARNING: removing %i of %i total samples due to NaNs:"% (nan_samps,total_samps))
1444 for param in nan_dict.keys():
1445 print("\t%i NaNs in %s."%(nan_dict[param],param))
1446 self.delete_samples_by_idx(nan_idxs)
1447 return
1448
1449 @property
1450 def DIC(self):
1451 """Returns the Deviance Information Criterion estimated from the
1452 posterior samples. The DIC is defined as -2*(<log(L)> -
1453 Var(log(L))); smaller values are "better."
1454
1455 """
1456
1457 return -2.0*(np.mean(self._logL) - np.var(self._logL))
1458
1459 @property
1460 def injection(self):
1461 """
1462 Return the injected values.
1463
1464 """
1465
1466 return self._injection
1467
1468 @property
1469 def triggers(self):
1470 """
1471 Return the trigger values .
1472
1473 """
1474
1475 return self._triggers
1476
1477 def _total_incl_restarts(self, samples):
1478 total=0
1479 last=samples[0]
1480 for x in samples[1:]:
1481 if x < last:
1482 total += last
1483 last = x
1484 total += samples[-1]
1485 return total
1486
1487 def longest_chain_cycles(self):
1488 """
1489 Returns the number of cycles in the longest chain
1490
1491 """
1492 samps,header=self.samples()
1493 header=header.split()
1494 if not ('cycle' in header):
1495 raise RuntimeError("Cannot compute number of cycles in longest chain")
1496
1497 cycle_col=header.index('cycle')
1498 if 'chain' in header:
1499 chain_col=header.index('chain')
1500 chain_indexes=np.unique(samps[:,chain_col])
1501 max_cycle=0
1502 for ind in chain_indexes:
1503 chain_cycle_samps=samps[ samps[:,chain_col] == ind, cycle_col ]
1504 max_cycle=max(max_cycle, self._total_incl_restarts(chain_cycle_samps))
1505 return int(max_cycle)
1506 else:
1507 return int(self._total_incl_restarts(samps[:,cycle_col]))
1508
1509 #@injection.setter #Python 2.6+
1510 def set_injection(self,injection):
1511 """
1512 Set the injected values of the parameters.
1513
1514 @param injection: A SimInspiralTable row object containing the injected parameters.
1515 """
1516 if injection is not None:
1517 self._injection=injection
1518 for name,onepos in self:
1519 new_injval=self._getinjpar(name)
1520 if new_injval is not None:
1521 self[name].set_injval(new_injval)
1522
1523 def set_triggers(self,triggers):
1524 """
1525 Set the trigger values of the parameters.
1526
1527 @param triggers: A list of SnglInspiral objects.
1528 """
1529 if triggers is not None:
1530 self._triggers=triggers
1531 for name,onepos in self:
1532 new_trigvals=self._gettrigpar(name)
1533 if new_trigvals is not None:
1534 self[name].set_trigvals(new_trigvals)
1535
1536
1537 def _getinjpar(self,paramname):
1538 """
1539 Map parameter names to parameters in a SimInspiralTable .
1540 """
1541 if self._injection is not None:
1542 for key,value in self._injXMLFuncMap.items():
1543 if paramname.lower().strip() == key.lower().strip():
1544 try:
1545 return self._injXMLFuncMap[key](self._injection)
1546 except TypeError:
1547 return self._injXMLFuncMap[key]
1548 return None
1549
1550 def _gettrigpar(self,paramname):
1551 """
1552 Map parameter names to parameters in a SnglInspiral.
1553 """
1554 vals = None
1555 if self._triggers is not None:
1556 for key,value in self._injXMLFuncMap.items():
1557 if paramname.lower().strip() == key.lower().strip():
1558 try:
1559 vals = dict([(trig.ifo,self._injXMLFuncMap[key](trig)) for trig in self._triggers])
1560 except TypeError:
1561 return self._injXMLFuncMap[key]
1562 except AttributeError:
1563 return None
1564 return vals
1565
1566 def __getitem__(self,key):
1567 """
1568 Container method . Returns posterior chain,one_d_pos, with name one_d_pos.name.
1569 """
1570 return self._posterior[key.lower()]
1571
1572 def __len__(self):
1573 """
1574 Container method. Defined as number of samples.
1575 """
1576 return len(self._logL)
1577
1578 def __iter__(self):
1579 """
1580 Container method. Returns iterator from self.forward for use in
1581 for (...) in (...) etc.
1582 """
1583 return self.forward()
1584
1585 def forward(self):
1586 """
1587 Generate a forward iterator (in sense of list of names) over Posterior
1588 with name,one_d_pos.
1589 """
1590 current_item = 0
1591 while current_item < self.dim:
1592 name=list(self._posterior.keys())[current_item]
1593 pos=self._posterior[name]
1594 current_item += 1
1595 yield name,pos
1596
1597 def bySample(self):
1598 """
1599 Generate a forward iterator over the list of samples corresponding to
1600 the data stored within the Posterior instance. These are returned as
1601 ParameterSamples instances.
1602 """
1603 current_item=0
1604 pos_array,header=self.samples
1605 while current_item < len(self):
1606 sample_array=(np.squeeze(pos_array[current_item,:]))
1607 yield PosteriorSample(sample_array, header, header)
1608 current_item += 1
1609
1610
1611 @property
1612 def dim(self):
1613 """
1614 Return number of parameters.
1615 """
1616 return len(self._posterior.keys())
1617
1618 @property
1619 def names(self):
1620 """
1621 Return list of parameter names.
1622 """
1623 nameslist=[]
1624 for key,value in self:
1625 nameslist.append(key)
1626 return nameslist
1627
1628 @property
1629 def means(self):
1630 """
1631 Return dict {paramName:paramMean} .
1632 """
1633 meansdict={}
1634 for name,pos in self:
1635 meansdict[name]=pos.mean
1636 return meansdict
1637
1638 @property
1639 def medians(self):
1640 """
1641 Return dict {paramName:paramMedian} .
1642 """
1643 mediansdict={}
1644 for name,pos in self:
1645 mediansdict[name]=pos.median
1646 return mediansdict
1647
1648 @property
1649 def stdevs(self):
1650 """
1651 Return dict {paramName:paramStandardDeviation} .
1652 """
1653 stdsdict={}
1654 for name,pos in self:
1655 stdsdict[name]=pos.stdev
1656 return stdsdict
1657
1658 @property
1659 def name(self):
1660 """
1661 Return qualified string containing the 'name' of the Posterior instance.
1662 """
1663 return self.__name
1664
1665 @property
1666 def description(self):
1667 """
1668 Return qualified string containing a 'description' of the Posterior instance.
1669 """
1670 return self.__description
1671
1672 def append(self,one_d_posterior):
1673 """
1674 Container method. Add a new OneDParameter to the Posterior instance.
1675 """
1676 self._posterior[one_d_posterior.name]=one_d_posterior
1677 return
1678
1679 def pop(self,param_name):
1680 """
1681 Container method. Remove PosteriorOneDPDF from the Posterior instance.
1682 """
1683 return self._posterior.pop(param_name)
1684
1685 def append_mapping(self, new_param_names, func, post_names):
1686 """
1687 Append posteriors pos1,pos2,...=func(post_names)
1688 """
1689 # deepcopy 1D posteriors to ensure mapping function doesn't modify the originals
1690 import copy
1691 #1D input
1692 if isinstance(post_names, str):
1693 old_post = copy.deepcopy(self[post_names])
1694 old_inj = old_post.injval
1695 old_trigs = old_post.trigvals
1696 if old_inj:
1697 new_inj = func(old_inj)
1698 else:
1699 new_inj = None
1700 if old_trigs:
1701 new_trigs = {}
1702 for IFO in old_trigs.keys():
1703 new_trigs[IFO] = func(old_trigs[IFO])
1704 else:
1705 new_trigs = None
1706
1707 samps = func(old_post.samples)
1708 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=new_inj, trigger_values=new_trigs)
1709 if new_post.samples.ndim == 0:
1710 print("WARNING: No posterior calculated for %s ..." % new_post.name)
1711 else:
1712 self.append(new_post)
1713 #MultiD input
1714 else:
1715 old_posts = [copy.deepcopy(self[post_name]) for post_name in post_names]
1716 old_injs = [post.injval for post in old_posts]
1717 old_trigs = [post.trigvals for post in old_posts]
1718 samps = func(*[post.samples for post in old_posts])
1719 #1D output
1720 if isinstance(new_param_names, str):
1721 if None not in old_injs:
1722 inj = func(*old_injs)
1723 else:
1724 inj = None
1725 if None not in old_trigs:
1726 new_trigs = {}
1727 for IFO in old_trigs[0].keys():
1728 oldvals = [param[IFO] for param in old_trigs]
1729 new_trigs[IFO] = func(*oldvals)
1730 else:
1731 new_trigs = None
1732 new_post = PosteriorOneDPDF(new_param_names, samps, injected_value=inj, trigger_values=new_trigs)
1733 self.append(new_post)
1734 #MultiD output
1735 else:
1736 if None not in old_injs:
1737 injs = func(*old_injs)
1738 else:
1739 injs = [None for name in new_param_names]
1740 if None not in old_trigs:
1741 new_trigs = [{} for param in range(len(new_param_names))]
1742 for IFO in old_trigs[0].keys():
1743 oldvals = [param[IFO] for param in old_trigs]
1744 newvals = func(*oldvals)
1745 for param,newval in enumerate(newvals):
1746 new_trigs[param][IFO] = newval
1747 else:
1748 new_trigs = [None for param in range(len(new_param_names))]
1749 if not samps: return() # Something went wrong
1750 new_posts = [PosteriorOneDPDF(new_param_name,samp,injected_value=inj,trigger_values=new_trigs) for (new_param_name,samp,inj,new_trigs) in zip(new_param_names,samps,injs,new_trigs)]
1751 for post in new_posts:
1752 if post.samples.ndim == 0:
1753 print("WARNING: No posterior calculated for %s ..." % post.name)
1754 else:
1755 self.append(post)
1756 return
1757
1758 def _average_posterior(self, samples, post_name):
1759 """
1760 Returns the average value of the 'post_name' column of the
1761 given samples.
1762 """
1763 ap = 0.0
1764 for samp in samples:
1765 ap = ap + samp[post_name]
1766 return ap / len(samples)
1767
1768 def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias = 0):
1769 """
1770 Returns the average value of the posterior assuming that the
1771 'logl_name' column contains log(L) and the 'prior_name' column
1772 contains the prior (un-logged).
1773 """
1774 ap = 0.0
1775 for samp in samples:
1776 ap += np.exp(samp[logl_name]-log_bias)*samp[prior_name]
1777 return ap / len(samples)
1778
1779 def _bias_factor(self):
1780 """
1781 Returns a sensible bias factor for the evidence so that
1782 integrals are representable as doubles.
1783 """
1784 return np.mean(self._logL)
1785
1786 def di_evidence(self, boxing=64):
1787 """
1788 Returns the log of the direct-integration evidence for the
1789 posterior samples.
1790 """
1791 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2",
1792 "iota", "psi", "ra", "dec",
1793 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"]
1794 samples,header=self.samples()
1795 header=header.split()
1796 coord_names=[name for name in allowed_coord_names if name in header]
1797 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
1798 tree=KDTree(coordinatized_samples)
1799
1800 if "prior" in header and "logl" in header:
1801 bf = self._bias_factor()
1802 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "logl", "prior", bf), boxing))
1803 elif "prior" in header and "likelihood" in header:
1804 bf = self._bias_factor()
1805 return bf + np.log(tree.integrate(lambda samps: self._average_posterior_like_prior(samps, "likelihood", "prior", bf), boxing))
1806 elif "post" in header:
1807 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "post"), boxing))
1808 elif "posterior" in header:
1809 return np.log(tree.integrate(lambda samps: self._average_posterior(samps, "posterior"), boxing))
1810 else:
1811 raise RuntimeError("could not find 'post', 'posterior', 'logl' and 'prior', or 'likelihood' and 'prior' columns in output to compute direct integration evidence")
1812
1814 """Returns an approximation to the log(evidence) obtained by
1815 fitting an ellipse around the highest-posterior samples and
1816 performing the harmonic mean approximation within the ellipse.
1817 Because the ellipse should be well-sampled, this provides a
1818 better approximation to the evidence than the full-domain HM."""
1819 allowed_coord_names=["spin1", "spin2", "a1", "phi1", "theta1", "a2", "phi2", "theta2",
1820 "iota", "psi", "ra", "dec",
1821 "phi_orb", "phi0", "dist", "time", "mc", "mchirp", "chirpmass", "q"]
1822 samples,header=self.samples()
1823 header=header.split()
1824
1825 n=int(0.05*samples.shape[0])
1826 if not n > 1:
1827 raise IndexError
1828
1829 coord_names=[name for name in allowed_coord_names if name in header]
1830 indexes=np.argsort(self._logL[:,0])
1831
1832 my_samples=samples[indexes[-n:], :] # The highest posterior samples.
1833 my_samples=np.array([PosteriorSample(sample,header,coord_names).coord() for sample in my_samples])
1834
1835 mu=np.mean(my_samples, axis=0)
1836 cov=np.cov(my_samples, rowvar=0)
1837
1838 d0=None
1839 for mysample in my_samples:
1840 d=np.dot(mysample-mu, np.linalg.solve(cov, mysample-mu))
1841 if d0 is None:
1842 d0 = d
1843 else:
1844 d0=max(d0,d)
1845
1846 ellipse_logl=[]
1847 ellipse_samples=[]
1848 for sample,logl in zip(samples, self._logL):
1849 coord=PosteriorSample(sample, header, coord_names).coord()
1850 d=np.dot(coord-mu, np.linalg.solve(cov, coord-mu))
1851
1852 if d <= d0:
1853 ellipse_logl.append(logl)
1854 ellipse_samples.append(sample)
1855
1856 if len(ellipse_samples) > 5*n:
1857 print('WARNING: ellpise evidence region encloses significantly more samples than %d'%n)
1858
1859 ellipse_samples=np.array(ellipse_samples)
1860 ellipse_logl=np.array(ellipse_logl)
1861
1862 ndim = len(coord_names)
1863 ellipse_volume=np.pi**(ndim/2.0)*d0**(ndim/2.0)/special.gamma(ndim/2.0+1)*np.sqrt(np.linalg.det(cov))
1864
1865 try:
1866 prior_index=header.index('prior')
1867 pmu=np.mean(ellipse_samples[:,prior_index])
1868 pstd=np.std(ellipse_samples[:,prior_index])
1869 if pstd/pmu > 1.0:
1870 print('WARNING: prior variation greater than 100\\% over elliptical volume.')
1871 approx_prior_integral=ellipse_volume*pmu
1872 except KeyError:
1873 # Maybe prior = 1?
1874 approx_prior_integral=ellipse_volume
1875
1876 ll_bias=np.mean(ellipse_logl)
1877 ellipse_logl = ellipse_logl - ll_bias
1878
1879 return np.log(approx_prior_integral) - np.log(np.mean(1.0/np.exp(ellipse_logl))) + ll_bias
1880
1881 def harmonic_mean_evidence(self):
1882 """
1883 Returns the log of the harmonic mean evidence for the set of
1884 posterior samples.
1885 """
1886 bf = self._bias_factor()
1887 return bf + np.log(1/np.mean(1/np.exp(self._logL-bf)))
1888
1889 def _posMaxL(self):
1890 """
1891 Find the sample with maximum likelihood probability. Returns value
1892 of likelihood and index of sample .
1893 """
1894 logl_vals=self._logL
1895 max_i=0
1896 max_logl=logl_vals[0]
1897 for i in range(len(logl_vals)):
1898 if logl_vals[i] > max_logl:
1899 max_logl=logl_vals[i]
1900 max_i=i
1901 return max_logl,max_i
1902
1903 def _posMap(self):
1904 """
1905 Find the sample with maximum a posteriori probability. Returns value
1906 of posterior and index of sample .
1907 """
1908 logl_vals=self._logL
1909 if self._logP is not None:
1910 logp_vals=self._logP
1911 else:
1912 return None
1913
1914 max_i=0
1915 max_pos=logl_vals[0]+logp_vals[0]
1916 for i in range(len(logl_vals)):
1917 if logl_vals[i]+logp_vals[i] > max_pos:
1918 max_pos=logl_vals[i]+logp_vals[i]
1919 max_i=i
1920 return max_pos,max_i
1921
1922 def _print_table_row(self,name,entries):
1923 """
1924 Print a html table row representation of
1925
1926 name:item1,item2,item3,...
1927 """
1928
1929 row_str='<tr><td>%s</td>'%name
1930 for entry in entries:
1931 row_str+='<td>%s</td>'%entry
1932 row_str+='</tr>'
1933 return row_str
1934
1935 @property
1936 def maxL(self):
1937 """
1938 Return the maximum likelihood probability and the corresponding
1939 set of parameters.
1940 """
1941 maxLvals={}
1942 max_logl,max_i=self._posMaxL()
1943 for param_name in self.names:
1944 maxLvals[param_name]=self._posterior[param_name].samples[max_i][0]
1945
1946 return (max_logl,maxLvals)
1947
1948 @property
1949 def maxP(self):
1950 """
1951 Return the maximum a posteriori probability and the corresponding
1952 set of parameters.
1953 """
1954 maxPvals={}
1955 max_pos,max_i=self._posMap()
1956 for param_name in self.names:
1957 maxPvals[param_name]=self._posterior[param_name].samples[max_i][0]
1958
1959 return (max_pos,maxPvals)
1960
1961
1962 def samples(self):
1963 """
1964 Return an (M,N) numpy.array of posterior samples; M = len(self);
1965 N = dim(self) .
1966 """
1967 header_string=''
1968 posterior_table=[]
1969 for param_name,one_pos in self:
1970 column=np.array(one_pos.samples)
1971 header_string+=param_name+'\t'
1972 posterior_table.append(column)
1973 posterior_table=tuple(posterior_table)
1974 return np.column_stack(posterior_table),header_string
1975
1976 def write_to_file(self,fname):
1977 """
1978 Dump the posterior table to a file in the 'common format'.
1979 """
1980 posterior_table, header_string = self.samples()
1981 np.savetxt(
1982 fname,
1983 posterior_table,
1984 comments='',
1985 delimiter='\t',
1986 header=header_string,
1987 )
1988
1989 def gelman_rubin(self, pname):
1990 """
1991 Returns an approximation to the Gelman-Rubin statistic (see
1992 Gelman, A. and Rubin, D. B., Statistical Science, Vol 7,
1993 No. 4, pp. 457--511 (1992)) for the parameter given, accurate
1994 as the number of samples in each chain goes to infinity. The
1995 posterior samples must have a column named 'chain' so that the
1996 different chains can be separated.
1997 """
1998 from numpy import seterr as np_seterr
1999 np_seterr(all='raise')
2000
2001 if "chain" in self.names:
2002 chains=np.unique(self["chain"].samples)
2003 chain_index=self.names.index("chain")
2004 param_index=self.names.index(pname)
2005 data,header=self.samples()
2006 chainData=[data[ data[:,chain_index] == chain, param_index] for chain in chains]
2007 allData=np.concatenate(chainData)
2008 chainMeans=[np.mean(data) for data in chainData]
2009 chainVars=[np.var(data) for data in chainData]
2010 BoverN=np.var(chainMeans)
2011 W=np.mean(chainVars)
2012 sigmaHat2=W + BoverN
2013 m=len(chainData)
2014 VHat=sigmaHat2 + BoverN/m
2015 try:
2016 R = VHat/W
2017 except:
2018 print("Error when computer Gelman-Rubin R statistic for %s. This may be a fixed parameter"%pname)
2019 R = np.nan
2020 return R
2021 else:
2022 raise RuntimeError('could not find necessary column header "chain" in posterior samples')
2023
2024 def healpix_map(self, resol, nest=True):
2025 """Returns a healpix map in the pixel ordering that represents the
2026 posterior density (per square degree) on the sky. The pixels
2027 will be chosen to have at least the given resolution (in
2028 degrees).
2029
2030 """
2031
2032 # Ensure that the resolution is twice the desired
2033 nside = 2
2034 while hp.nside2resol(nside, arcmin=True) > resol*60.0/2.0:
2035 nside *= 2
2036
2037 ras = self['ra'].samples.squeeze()
2038 decs = self['dec'].samples.squeeze()
2039
2040 phis = ras
2041 thetas = np.pi/2.0 - decs
2042
2043 # Create the map in ring ordering
2044 inds = hp.ang2pix(nside, thetas, phis, nest=False)
2045 counts = np.bincount(inds)
2046 if counts.shape[0] < hp.nside2npix(nside):
2047 counts = np.concatenate((counts, np.zeros(hp.nside2npix(nside) - counts.shape[0])))
2048
2049 # Smooth the map a bit (Gaussian sigma = resol)
2050 hpmap = hp.sphtfunc.smoothing(counts, sigma=resol*np.pi/180.0)
2051
2052 hpmap = hpmap / (np.sum(hpmap)*hp.nside2pixarea(nside, degrees=True))
2053
2054 if nest:
2055 hpmap = hp.reorder(hpmap, r2n=True)
2056
2057 return hpmap
2058
2059 def __str__(self):
2060 """
2061 Define a string representation of the Posterior class ; returns
2062 a html formatted table of various properties of posteriors.
2063 """
2064 return_val='<table border="1" id="statstable"><tr><th/>'
2065 column_names=['maP','maxL','stdev','mean','median','stacc','injection value']
2066 IFOs = []
2067 if self._triggers is not None:
2068 IFOs = [trig.ifo for trig in self._triggers]
2069 for IFO in IFOs:
2070 column_names.append(IFO+' trigger values')
2071
2072 for column_name in column_names:
2073 return_val+='<th>%s</th>'%column_name
2074
2075 return_val+='</tr>'
2076
2077 for name,oned_pos in self:
2078
2079 max_logl,max_i=self._posMaxL()
2080 maxL=oned_pos.samples[max_i][0]
2081 max_post,max_j=self._posMap()
2082 maP=oned_pos.samples[max_j][0]
2083 mean=str(oned_pos.mean)
2084 stdev=str(oned_pos.stdev)
2085 median=str(np.squeeze(oned_pos.median))
2086 stacc=str(oned_pos.stacc)
2087 injval=str(oned_pos.injval)
2088 trigvals=oned_pos.trigvals
2089
2090 row = [maP,maxL,stdev,mean,median,stacc,injval]
2091 if self._triggers is not None:
2092 for IFO in IFOs:
2093 try:
2094 row.append(str(trigvals[IFO]))
2095 except TypeError:
2096 row.append(None)
2097 return_val+=self._print_table_row(name,row)
2098
2099 return_val+='</table>'
2100
2101 parser=XMLParser()
2102 parser.feed(return_val)
2103 Estr=parser.close()
2104
2105 elem=Estr
2106 rough_string = tostring(elem, 'utf-8')
2107 reparsed = minidom.parseString(rough_string)
2108 return_val=reparsed.toprettyxml(indent=" ")
2109 return return_val[len('<?xml version="1.0" ?>')+1:]
2110
2111
2112 #===============================================================================
2113 # Functions used to parse injection structure.
2114 #===============================================================================
2115 def _inj_m1(self,inj):
2116 """
2117 Return the mapping of (mchirp,eta)->m1; m1>m2 i.e. return the greater of the mass
2118 components (m1) calculated from the chirp mass and the symmetric mass ratio.
2119
2120 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2121 """
2122 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2123 return mass1
2124
2125 def _inj_m2(self,inj):
2126 """
2127 Return the mapping of (mchirp,eta)->m2; m1>m2 i.e. return the lesser of the mass
2128 components (m2) calculated from the chirp mass and the symmetric mass ratio.
2129
2130 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2131 """
2132 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2133 return mass2
2134
2135 def _inj_q(self,inj):
2136 """
2137 Return the mapping of (mchirp,eta)->q; m1>m2 i.e. return the mass ratio q=m2/m1.
2138
2139 @param inj: a custom type with the attributes 'mchirp' and 'eta'.
2140 """
2141 (mass1,mass2)=mc2ms(inj.mchirp,inj.eta)
2142 return mass2/mass1
2143
2144 def _inj_longitude(self,inj):
2145 """
2146 Return the mapping of longitude found in inj to the interval [0,2*pi).
2147
2148 @param inj: a custom type with the attribute 'longitude'.
2149 """
2150 if inj.longitude>2*pi_constant or inj.longitude<0.0:
2151 maplong=2*pi_constant*(((float(inj.longitude))/(2*pi_constant)) - floor(((float(inj.longitude))/(2*pi_constant))))
2152 print("Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(str(inj.longitude),str(maplong)))
2153 return maplong
2154 else:
2155 return inj.longitude
2156
2157 def _inj_spins(self, inj, frame='OrbitalL'):
2158
2159 from lalsimulation import SimInspiralTransformPrecessingWvf2PE
2160
2161 spins = {}
2162 f_ref = self._injFref
2163
2164 if not inj:
2165 spins = {}
2166
2167 else:
2168 axis = lalsim.SimInspiralGetFrameAxisFromString(frame)
2169 s1x=inj.spin1x
2170 s1y=inj.spin1y
2171 s1z=inj.spin1z
2172 s2x=inj.spin2x
2173 s2y=inj.spin2y
2174 s2z=inj.spin2z
2175 iota=inj.inclination
2176 m1, m2 = inj.mass1, inj.mass2
2177 mc, eta = inj.mchirp, inj.eta
2178
2179 a1, theta1, phi1 = cart2sph(s1x, s1y, s1z)
2180 a2, theta2, phi2 = cart2sph(s2x, s2y, s2z)
2181
2182 spins = {'a1':a1, 'theta1':theta1, 'phi1':phi1,
2183 'a2':a2, 'theta2':theta2, 'phi2':phi2,
2184 'iota':iota}
2185 # If spins are aligned, save the sign of the z-component
2186 if inj.spin1x == inj.spin1y == inj.spin2x == inj.spin2y == 0.:
2187 spins['a1z'] = inj.spin1z
2188 spins['a2z'] = inj.spin2z
2189
2190 L = orbital_momentum(f_ref, m1,m2, iota)
2191 S1 = np.hstack((s1x, s1y, s1z))
2192 S2 = np.hstack((s2x, s2y, s2z))
2193
2194 zhat = np.array([0., 0., 1.])
2195 aligned_comp_spin1 = array_dot(S1, zhat)
2196 aligned_comp_spin2 = array_dot(S2, zhat)
2197 chi = aligned_comp_spin1 + aligned_comp_spin2 + \
2198 np.sqrt(1. - 4.*eta) * (aligned_comp_spin1 - aligned_comp_spin2)
2199 S1 *= m1**2
2200 S2 *= m2**2
2201 J = L + S1 + S2
2202
2203 beta = array_ang_sep(J, L)
2204 spins['beta'] = beta
2205 spins['spinchi'] = chi
2206 # Huge caveat: SimInspiralTransformPrecessingWvf2PE assumes that the cartesian spins in the XML table are given in the L frame, ie. in a frame where L||z. While this is the default in inspinj these days, other possibilities exist.
2207 # Unfortunately, we don't have a function (AFIK), that transforms spins from an arbitrary frame to an arbitrary frame, otherwise I'd have called it here to be sure we convert in the L frame.
2208 # FIXME: add that function here if it ever gets written. For the moment just check
2209 if not frame=='OrbitalL':
2210 print("I cannot calculate the injected values of the spin angles unless frame is OrbitalL. Skipping...")
2211 return spins
2212 # m1 and m2 here are NOT in SI, but in Msun, this is not a typo.
2213 theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2=SimInspiralTransformPrecessingWvf2PE(inj.inclination,inj.spin1x, inj.spin1y, inj.spin1z,inj.spin2x, inj.spin2y, inj.spin2z, m1, m2, f_ref, inj.coa_phase)
2214 spins['theta_jn']=theta_jn
2215 spins['phi12']=phi12
2216 spins['tilt1']=tilt1
2217 spins['tilt2']=tilt2
2218 spins['phi_jl']=phi_jl
2219
2220 """
2221 #If everything is all right, this function should give back the cartesian spins. Uncomment to check
2222 print("Inverting ")
2223 iota_back,a1x_back,a1y_back,a1z_back,a2x_back,a2y_back,a2z_back = \
2224 lalsim.SimInspiralTransformPrecessingNewInitialConditions(theta_jn,phi_jl,tilt1,tilt2,phi12,chi1,chi2,m1*lal.MSUN_SI,m2*lal.MSUN_SI,f_ref,inj.coa_phase)
2225 print(a1x_back,a1y_back,a1z_back)
2226 print(a2x_back,a2y_back,a2z_back)
2227 print(iota_back)
2228 """
2229
2230 return spins
2231
2233 """
2234 Data structure for a table of posterior samples .
2235 """
2236 def __init__(self,commonResultsFormatData,SimBurstTableEntry=None,injFref=None,SnglBurstList=None,name=None,description=None):
2237 """
2238 Constructor.
2239
2240 @param commonResultsFormatData: A 2D array containing the posterior
2241 samples and related data. The samples chains form the columns.
2242 @param SimBurstTableEntry: A ligolw.lscstables.SimBurst row containing the injected values.
2243 @param SnglBurstList: A list of SnglBurst objects containing the triggers.
2244 @param injFref: reference frequency in injection
2245 @param name: optional name for this Posterior
2246 @param description: optional description for this Posterior
2247 """
2248 common_output_table_header,common_output_table_raw =commonResultsFormatData
2249 self._posterior_posterior={}
2250 self._injFref_injFref=injFref
2251 self._injection_injection=SimBurstTableEntry
2252 self._triggers_triggers=SnglBurstList
2253 self._loglaliases_loglaliases=['posterior', 'logl','logL','likelihood', 'deltalogl']
2254 self._logpaliases_logpaliases=['logp', 'logP','prior','logprior','Prior','logPrior']
2255
2256 common_output_table_header=[i.lower() for i in common_output_table_header]
2257
2258 # Define XML mapping
2260 'f0':lambda inj:inj.frequency,
2261 'frequency':lambda inj:inj.frequency,
2262 'centre_frequency':lambda inj:inj.frequency,
2263 'quality':lambda inj:inj.q,
2264 'hrss':lambda inj:inj.hrss,
2265 'loghrss':lambda inj:np.log(inj.hrss),
2266 'polar_angle':lambda inj:inj.pol_ellipse_angle,
2267 'pol_ellipse_angle':lambda inj:inj.pol_ellipse_angle,
2268 'pol_ellipse_e':lambda inj:inj.pol_ellipse_e,
2269 'alpha':lambda inj:inj.pol_ellipse_angle,
2270 'polar_eccentricity':lambda inj:inj.pol_ellipse_e,
2271 'eccentricity':lambda inj:inj.pol_ellipse_e,
2272 'time': lambda inj:float(get_end(inj)),
2273 'end_time': lambda inj:float(get_end(inj)),
2275 'rightascension':self._inj_longitude_inj_longitude,
2276 'long':self._inj_longitude_inj_longitude,
2277 'longitude':self._inj_longitude_inj_longitude,
2278 'dec':lambda inj:inj.dec,
2279 'declination':lambda inj:inj.dec,
2280 'lat':lambda inj:inj.dec,
2281 'latitude':lambda inj:inj.dec,
2282 'psi': lambda inj: np.mod(inj.psi, np.pi),
2283 'f_ref': lambda inj: self._injFref_injFref,
2284 'polarisation':lambda inj:inj.psi,
2285 'polarization':lambda inj:inj.psi,
2286 'duration':lambda inj:inj.duration,
2287 'h1_end_time':lambda inj:det_end_time('H', inj),
2288 'l1_end_time':lambda inj:det_end_time('L', inj),
2289 'v1_end_time':lambda inj:det_end_time('V', inj),
2290 }
2291
2292 for one_d_posterior_samples,param_name in zip(np.hsplit(common_output_table_raw,common_output_table_raw.shape[1]),common_output_table_header):
2293
2294 self._posterior_posterior[param_name]=PosteriorOneDPDF(param_name.lower(),one_d_posterior_samples,injected_value=self._getinjpar(param_name),injFref=self._injFref_injFref,trigger_values=self._gettrigpar(param_name))
2295
2296 logLFound=False
2297
2298 for loglalias in self._loglaliases_loglaliases:
2299 if loglalias in common_output_table_header:
2300 try:
2301 self._logL_logL=self._posterior_posterior[loglalias].samples
2302 except KeyError:
2303 print("No '%s' column in input table!"%loglalias)
2304 continue
2305 logLFound=True
2306
2307 if not logLFound:
2308 raise RuntimeError("No likelihood/posterior values found!")
2309
2310 self._logP_logP=None
2311 for logpalias in self._logpaliases_logpaliases:
2312 if logpalias in common_output_table_header:
2313 try:
2314 self._logP_logP=self._posterior_posterior[logpalias].samples
2315 except KeyError:
2316 print("No '%s' column in input table!"%logpalias)
2317 continue
2318 if not 'log' in logpalias:
2319 self._logP_logP=[np.log(i) for i in self._logP_logP]
2320 if name is not None:
2321 self.__name__name=name
2322
2323 if description is not None:
2324 self.__description__description=description
2325
2326 return
2327 #===============================================================================
2328 # Functions used to parse injection structure.
2329 #===============================================================================
2330
2331 def _inj_longitude(self,inj):
2332 """
2333 Return the mapping of longitude found in inj to the interval [0,2*pi).
2334
2335 @param inj: a custom type with the attribute 'longitude'.
2336 """
2337 if inj.ra>2*pi_constant or inj.ra<0.0:
2338 maplong=2*pi_constant*(((float(inj.ra)/(2*pi_constant)) - floor(((float(inj.ra))/(2*pi_constant)))))
2339 print("Warning: Injected longitude/ra (%s) is not within [0,2\\pi)! Angles are assumed to be in radians so this will be mapped to [0,2\\pi). Mapped value is: %s."%(str(inj.ra),str(maplong)))
2340 return maplong
2341 else:
2342 return inj.ra
2343
2344class KDTree(object):
2345 """
2346 A kD-tree.
2347 """
2348 def __init__(self, objects):
2349 """
2350 Construct a kD-tree from a sequence of objects. Each object
2351 should return its coordinates using obj.coord().
2352 """
2353 if len(objects) == 0:
2354 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2355 elif len(objects) == 1:
2356 self._objects = objects[:]
2357 coord=self._objects[0].coord()
2358 self._bounds = coord,coord
2359 elif self._same_coords(objects):
2360 # All the same coordinates
2361 self._objects = [ objects[0] ]
2362 coord=self._objects[0].coord()
2363 self._bounds = coord,coord
2364 else:
2365 self._objects = objects[:]
2366 self._bounds = self._bounds_of_objects()
2367 low,high=self._bounds
2368 self._split_dim=self._longest_dimension()
2369 longest_dim = self._split_dim
2370 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[longest_dim])
2371 N = len(sorted_objects)
2372 bound=0.5*(sorted_objects[int(N/2)].coord()[longest_dim] + sorted_objects[int(N/2)-1].coord()[longest_dim])
2373 low = [obj for obj in self._objects if obj.coord()[longest_dim] < bound]
2374 high = [obj for obj in self._objects if obj.coord()[longest_dim] >= bound]
2375 if len(low)==0:
2376 # Then there must be multiple values with the same
2377 # coordinate as the minimum element of high
2378 low = [obj for obj in self._objects if obj.coord()[longest_dim]==bound]
2379 high = [obj for obj in self._objects if obj.coord()[longest_dim] > bound]
2380 self._left = KDTree(low)
2381 self._right = KDTree(high)
2382
2383 def _same_coords(self, objects):
2384 """
2385 True if and only if all the given objects have the same
2386 coordinates.
2387 """
2388 if len(objects) <= 1:
2389 return True
2390 coords = [obj.coord() for obj in objects]
2391 c0 = coords[0]
2392 for ci in coords[1:]:
2393 if not np.all(ci == c0):
2394 return False
2395 return True
2396
2397 def _bounds_of_objects(self):
2398 """
2399 Bounds of the objects contained in the tree.
2400 """
2401 low=self._objects[0].coord()
2402 high=self._objects[0].coord()
2403 for obj in self._objects[1:]:
2404 low=np.minimum(low,obj.coord())
2405 high=np.maximum(high,obj.coord())
2406 return low,high
2407
2408 def _longest_dimension(self):
2409 """
2410 Longest dimension of the tree bounds.
2411 """
2412 low,high = self._bounds
2413 widths = high-low
2414 return np.argmax(widths)
2415
2416 def objects(self):
2417 """
2418 Returns the objects in the tree.
2419 """
2420 return self._objects[:]
2421
2422 def __iter__(self):
2423 """
2424 Iterator over all the objects contained in the tree.
2425 """
2426 return self._objects.__iter__()
2427
2428 def left(self):
2429 """
2430 Returns the left tree.
2431 """
2432 return self._left
2433
2434 def right(self):
2435 """
2436 Returns the right tree.
2437 """
2438 return self._right
2439
2440 def split_dim(self):
2441 """
2442 Returns the dimension along which this level of the kD-tree
2443 splits.
2444 """
2445 return self._split_dim
2446
2447 def bounds(self):
2448 """
2449 Returns the coordinates of the lower-left and upper-right
2450 corners of the bounding box for this tree: low_left, up_right
2451 """
2452 return self._bounds
2453
2454 def volume(self):
2455 """
2456 Returns the volume of the bounding box of the tree.
2457 """
2458 v = 1.0
2459 low,high=self._bounds
2460 for l,h in zip(low,high):
2461 v = v*(h - l)
2462 return v
2463
2464 def integrate(self,f,boxing=64):
2465 """
2466 Returns the integral of f(objects) over the tree. The
2467 optional boxing parameter determines how deep to descend into
2468 the tree before computing f.
2469 """
2470 # if len(self._objects) <= boxing:
2471 # return self.volume()*f(self._objects)
2472 # else:
2473 # return self._left.integrate(f, boxing) + self._right.integrate(f, boxing)
2474
2475 def x(tree):
2476 return tree.volume()*f(tree._objects)
2477
2478 def y(a,b):
2479 return a+b
2480
2481 return self.operate(x,y,boxing=boxing)
2482
2483 def operate(self,f,g,boxing=64):
2484 """
2485 Operates on tree nodes exceeding boxing parameter depth.
2486 """
2487 if len(self._objects) <= boxing:
2488 return f(self)
2489 else:
2490
2491 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2492
2493
2494class KDTreeVolume(object):
2495 """
2496 A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
2497 Is modified from the KDTree class so that bounding boxes are stored. This means that
2498 there are no longer gaps in the hypervolume once the samples have been split into groups.
2499 """
2500 def __init__(self, objects,boundingbox,dims=0):
2501 """
2502 Construct a kD-tree from a sequence of objects. Each object
2503 should return its coordinates using obj.coord().
2504 the obj should also store the bounds of the hypervolume its found in.
2505 for non-leaf objects we need the name of the dimension split and value at split.
2506 """
2507 self._dimension = dims
2508 self._bounds = boundingbox
2509 self._weight = 1
2510 if len(objects) == 0: #for no objects - something is wrong, i think it can only happen in first call
2511 raise RuntimeError("cannot construct kD-tree out of zero objects---you may have a repeated sample in your list")
2512 elif len(objects) == 1: #1 object, have reached leaf of tree
2513 self._objects = objects[:]
2514 elif self._same_coords(objects): # When ALL samples have the same coordinates in all dimensions
2515 self._weight = len(objects)
2516 self._objects = [ objects[0] ] #need to modify kdtree_bin functions to use _weight to get correct number of samples
2517 coord=self._objects[0].coord()
2518 else: #construct next level of tree with multiple samples
2519 self._objects = objects[:]
2520 split_dim = self._dimension
2521 sorted_objects=sorted(self._objects, key=lambda obj: (obj.coord())[split_dim])
2522 N = len(sorted_objects)
2523 self._split_value = 0.5*(sorted_objects[int(N/2)].coord()[split_dim] + sorted_objects[int(N/2)-1].coord()[split_dim])
2524 bound = self._split_value
2525 low = [obj for obj in self._objects if obj.coord()[split_dim] < bound]
2526 high = [obj for obj in self._objects if obj.coord()[split_dim] >= bound]
2527 if len(low)==0:
2528 # Then there must be multiple values with the same
2529 # coordinate as the minimum element of 'high'
2530 low = [obj for obj in self._objects if obj.coord()[split_dim] == bound]
2531 high = [obj for obj in self._objects if obj.coord()[split_dim] > bound]
2532 leftBoundingbox = []
2533 rightBoundingbox = []
2534 for i in self._bounds:
2535 leftBoundingbox.append(list(i))
2536 rightBoundingbox.append(list(i))
2537 leftBoundingbox[1][split_dim] = bound
2538 rightBoundingbox[0][split_dim] = bound
2539 # designate the next dimension to use for split for sub-trees
2540 # if has got to the end of the list of dimensions then starts
2541 # again at dimension = 0
2542 if (split_dim < (len(self._objects[0].coord()) - 1)):
2543 child_dim = split_dim + 1
2544 else:
2545 child_dim = 0
2546 self._left = KDTreeVolume(low,leftBoundingbox,dims = child_dim)
2547 # added in a load of messing about incase there are repeated values in the currently checked dimension
2548 if (len(high) != 0):
2549 self._right = KDTreeVolume(high,rightBoundingbox,dims = child_dim)
2550 else:
2551 self._right = None
2552
2553 def _same_coords(self, objects):
2554 """
2555 True if and only if all the given objects have the same
2556 coordinates.
2557 """
2558 if len(objects) <= 1:
2559 return True
2560 coords = [obj.coord() for obj in objects]
2561 c0 = coords[0]
2562 for ci in coords[1:]:
2563 if not np.all(ci == c0):
2564 return False
2565 return True
2566
2567 def objects(self):
2568 """
2569 Returns the objects in the tree.
2570 """
2571 return self._objects[:]
2572
2573 def __iter__(self):
2574 """
2575 Iterator over all the objects contained in the tree.
2576 """
2577 return self._objects.__iter__()
2578
2579 def left(self):
2580 """
2581 Returns the left tree.
2582 """
2583 return self._left
2584
2585 def right(self):
2586 """
2587 Returns the right tree.
2588 """
2589 return self._right
2590
2591 def split_dim(self):
2592 """
2593 Returns the dimension along which this level of the kD-tree
2594 splits.
2595 """
2596 return self._split_dim
2597
2598 def bounds(self):
2599 """
2600 Returns the coordinates of the lower-left and upper-right
2601 corners of the bounding box for this tree: low_left, up_right
2602 """
2603 return self._bounds
2604
2605 def volume(self):
2606 """
2607 Returns the volume of the bounding box of the tree.
2608 """
2609 v = 1.0
2610 low,high=self._bounds
2611 for l,h in zip(low,high):
2612 v = v*(h - l)
2613 return v
2614
2615 def integrate(self,f,boxing=64):
2616 """
2617 Returns the integral of f(objects) over the tree. The
2618 optional boxing parameter determines how deep to descend into
2619 the tree before computing f.
2620 """
2621 def x(tree):
2622 return tree.volume()*f(tree._objects)
2623
2624 def y(a,b):
2625 return a+b
2626
2627 return self.operate(x,y,boxing=boxing)
2628
2629 def operate(self,f,g,boxing=64):
2630 """
2631 Operates on tree nodes exceeding boxing parameter depth.
2632 """
2633 if len(self._objects) <= boxing:
2634 return f(self)
2635 else:
2636 return g(self._left.operate(f,g,boxing),self._right.operate(f,g,boxing))
2637
2638 def search(self,coordinates,boxing = 64):
2639 """
2640 takes a set of coordinates and searches down through the tree untill it gets
2641 to a box with less than 'boxing' objects in it and returns the box bounds,
2642 number of objects in the box, and the weighting.
2643 """
2644 if len(self._objects) <= boxing:
2645 return self._bounds,len(self._objects),self._weight
2646 elif coordinates[self._dimension] < self._split_value:
2647 return self._left.search(coordinates,boxing)
2648 else:
2649 return self._right.search(coordinates,boxing)
2650
2651 def fillNewTree(self,boxing = 64, isArea = False):
2652 """
2653 copies tree structure, but with KDSkeleton as the new nodes.
2654 """
2655 boxN = boxing
2656 if len(self._objects) <= boxN:
2657 newNode = KDSkeleton(self.bounds(), left_child = None , right_child = None)
2658 if isArea:
2659 newNode.setImportance(len(self._objects),skyArea(self.bounds()))
2660 else:
2661 newNode.setImportance(len(self._objects),self.volume())
2662 return newNode
2663 else:
2664 if isArea:
2665 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN,isArea=True), right_child = self._right.fillNewTree(boxN,isArea=True))
2666 newNode.setImportance(len(self._objects),skyArea(self.bounds()))
2667 else:
2668 newNode = KDSkeleton(self.bounds, left_child = self._left.fillNewTree(boxN), right_child = self._right.fillNewTree(boxN))
2669 newNode.setImportance(len(self._objects),self.volume())
2670 newNode.setSplit(self._dimension,self._split_value)
2671 return newNode
2672
2673class KDSkeleton(object):
2674 """
2675 object to store the structure of a kd tree
2676 """
2677
2678 def __init__(self, bounding_box, left_child = None, right_child = None):
2679 self._bounds = bounding_box
2680 #self._names = coordinate_names
2681 self._left = left_child
2682 self._right = right_child
2683 self._samples = 0
2684 self._splitValue = None
2685 self._splitDim = None
2686 self._importance = None
2687 self._volume = None
2688
2689 def addSample(self):
2690 self._samples +=1
2691
2692 def bounds(self):
2693 return self._bounds
2694
2695 def search(self,coordinates):
2696 """
2697 takes a set of coordinates and searches down through the tree untill it gets
2698 to a box with less than 'boxing' objects in it and returns the box bounds,
2699 number of objects in the box, and the weighting.
2700 """
2701 if self._left is None:
2702 return self._bounds, self._samples, self._importance
2703 elif coordinates[self._splitDim] < self._splitValue:
2704 return self._left.search(coordinates)
2705 else:
2706 return self._right.search(coordinates)
2707
2708 def setImportance(self, sampleNumber, volume):
2709 self._importance = sampleNumber/volume
2710 self._volume = volume
2711
2712 def setSplit(self,dimension,value):
2713 self._splitDim = dimension
2714 self._splitValue = value
2715
2716
2717class PosteriorSample(object):
2718 """
2719 A single parameter sample object, suitable for inclusion in a
2720 kD-tree.
2721 """
2722
2723 def __init__(self, sample_array, headers, coord_names):
2724 """
2725 Given the sample array, headers for the values, and the names
2726 of the desired coordinates, construct a parameter sample
2727 object.
2728 """
2729 self._samples=sample_array[:]
2730 self._headers=headers
2731 if not (len(sample_array) == len(self._headers)):
2732 print("Header length = ", len(self._headers))
2733 print("Sample length = ", len(sample_array))
2734 raise RuntimeError("parameter and sample lengths do not agree")
2735 self._coord_names=coord_names
2736 self._coord_indexes=[self._headers.index(name) for name in coord_names]
2737
2738 def __getitem__(self, key):
2739 """
2740 Return the element with the corresponding name.
2741 """
2742 key=key.lower()
2743 if key in self._headers:
2744 idx=self._headers.index(key)
2745 return self._samples[idx]
2746 else:
2747 raise KeyError("key not found in posterior sample: %s"%key)
2748
2749 def coord(self):
2750 """
2751 Return the coordinates for the parameter sample.
2752 """
2753 return self._samples[self._coord_indexes]
2754
2755
2756
2757
2758
2759class AnalyticLikelihood(object):
2760 """
2761 Return analytic likelihood values.
2762 """
2763
2764 def __init__(self, covariance_matrix_files, mean_vector_files):
2765 """
2766 Prepare analytic likelihood for the given parameters.
2767 """
2768 # Make sure files names are in a list
2769 if isinstance(covariance_matrix_files, str):
2770 covariance_matrix_files = [covariance_matrix_files]
2771 if isinstance(mean_vector_files, str):
2772 mean_vector_files = [mean_vector_files]
2773
2774 covarianceMatrices = [np.loadtxt(csvFile, delimiter=',') for csvFile in covariance_matrix_files]
2775 num_matrices = len(covarianceMatrices)
2776
2777 if num_matrices != len(mean_vector_files):
2778 raise RuntimeError('Must give a mean vector list for every covariance matrix')
2779
2780 param_line = open(mean_vector_files[0]).readline()
2781 self._params = [param.strip() for param in param_line.split(',')]
2782
2783 converter=lambda x: eval(x.replace('pi','%.32f'%pi_constant)) # converts fractions w/ pi (e.g. 3.0*pi/2.0)
2784 self._modes = []
2785 for i in range(num_matrices):
2786 CM = covarianceMatrices[i]
2787 vecFile = mean_vector_files[i]
2788
2789 param_line = open(vecFile).readline()
2790 params = [param.strip() for param in param_line.split(',')]
2791 if set(params)!=set(self._params):
2792 raise RuntimeError('Parameters do not agree between mean vector files.')
2793
2794 sigmas = dict(zip(params,np.sqrt(CM.diagonal())))
2795 colNums = range(len(params))
2796 converters = dict(zip(colNums,[converter for i in colNums]))
2797 meanVectors = np.loadtxt(vecFile, delimiter=',', skiprows=1, converters=converters)
2798 try:
2799 for vec in meanVectors:
2800 means = dict(zip(params,vec))
2801 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params]
2802 self._modes.append(dict(mode))
2803 except TypeError:
2804 means = dict(zip(params,meanVectors))
2805 mode = [(param, stats.norm(loc=means[param],scale=sigmas[param])) for param in params]
2806 self._modes.append(dict(mode))
2807 self._num_modes = len(self._modes)
2808
2809 def pdf(self, param):
2810 """
2811 Return PDF function for parameter.
2812 """
2813 pdf = None
2814 if param in self._params:
2815 pdf = lambda x: (1.0/self._num_modes) * sum([mode[param].pdf(x) for mode in self._modes])
2816 return pdf
2817
2818 def cdf(self, param):
2819 """
2820 Return PDF function for parameter.
2821 """
2822 cdf = None
2823 if param in self._params:
2824 cdf = lambda x: (1.0/self._num_modes) * sum([mode[param].cdf(x) for mode in self._modes])
2825 return cdf
2826
2827 @property
2828 def names(self):
2829 """
2830 Return list of parameter names described by analytic likelihood function.
2831 """
2832 return self._params
2833
2834
2835
2836#===============================================================================
2837# Web page creation classes (wrap ElementTrees)
2838#===============================================================================
2839
2840class htmlChunk(object):
2841 """
2842 A base class for representing web content using ElementTree .
2843 """
2844 def __init__(self,tag,attrib=None,parent=None):
2845
2846 self._html=Element(tag)#attrib={'xmlns':"http://www.w3.org/1999/xhtml"})
2847 if attrib:
2848 for attribname,attribvalue in attrib.items():
2849 self._html.attrib[attribname]=attribvalue
2850 if parent:
2851 parent.append(self._html)
2852
2853 def toprettyxml(self):
2854 """
2855 Return a pretty-printed XML string of the htmlPage.
2856 """
2857 elem=self._html
2858 rough_string = tostring(elem)
2859 reparsed = minidom.parseString(rough_string)
2860 return reparsed.toprettyxml(indent=" ")
2861
2862 def __str__(self):
2863 return self.toprettyxml()
2864
2865 def write(self,string):
2866 parser=XMLParser()
2867 parser.feed(string)
2868 Estr=parser.close()
2869 self._html.append(Estr)
2870
2871 def p(self,pstring):
2872 Ep=Element('p')
2873 Ep.text=pstring
2874 self._html.append(Ep)
2875 return Ep
2876
2877 def h1(self,h1string):
2878 Ep=Element('h1')
2879 Ep.text=h1string
2880 self._html.append(Ep)
2881 return Ep
2882#
2883 def h5(self,h1string):
2884 Ep=Element('h5')
2885 Ep.text=h1string
2886 self._html.append(Ep)
2887 return Ep
2888
2889 def h2(self,h2string):
2890 Ep=Element('h2')
2891 Ep.text=h2string
2892 self._html.append(Ep)
2893 return Ep
2894
2895 def h3(self,h1string):
2896 Ep=Element('h3')
2897 Ep.text=h1string
2898 self._html.append(Ep)
2899 return Ep
2900
2901 def br(self):
2902 Ebr=Element('br')
2903 self._html.append(Ebr)
2904 return Ebr
2905
2906 def hr(self):
2907 Ehr=Element('hr')
2908 self._html.append(Ehr)
2909 return Ehr
2910
2911 def a(self,url,linktext):
2912 Ea=Element('a')
2913 Ea.attrib['href']=url
2914 Ea.text=linktext
2915 self._html.append(Ea)
2916 return Ea
2917
2918 def tab(self,idtable=None):
2919 args={}
2920 if idtable is not None:
2921 args={'id':idtable}
2922
2923 Etab=Element('table',args)
2924 self._html.append(Etab)
2925 return Etab
2926
2927 def insert_row(self,tab,label=None):
2928
2929 """
2930 Insert row in table tab.
2931 If given, label used as id for the table tag
2932 """
2933
2934 Etr=Element('tr')
2935 if label is not None:
2936 Etr.attrib['id']=label
2937 tab.append(Etr)
2938 return Etr
2939
2940 def insert_td(self,row,td,label=None,legend=None):
2941 """
2942 Insert cell td into row row.
2943 Sets id to label, if given
2944 """
2945
2946 Etd=Element('td')
2947
2948 if type(td) is str:
2949 Etd.text=td
2950 else:
2951 td=tostring(td)
2952 td=minidom.parseString(td)
2953 td=td.toprettyxml(indent=" ")
2954 Etd.text=td
2955 if label is not None:
2956 Etd.attrib['id']=label
2957 if legend is not None:
2958 legend.a('#%s'%label,'%s'%label)
2959 legend.br()
2960 row.append(Etd)
2961 return Etd
2962
2963 def append(self,element):
2964 self._html.append(element)
2965
2966
2967#
2968class htmlPage(htmlChunk):
2969 """
2970 A concrete class for generating an XHTML(1) document. Inherits from htmlChunk.
2971 """
2972 def __init__(self,title=None,css=None,javascript=None,toc=False):
2973 htmlChunk.__init__(self,'html',attrib={'xmlns':"http://www.w3.org/1999/xhtml"})
2974 self.doctype_str='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
2975
2976 self._head=SubElement(self._html,'head')
2977 Etitle=SubElement(self._head,'title')
2978 self._body=SubElement(self._html,'body')
2979 self._css=None
2980 self._jscript=None
2981 if title is not None:
2982 Etitle.text=str(title)
2983 self._title=SubElement(self._body,'h1')
2984 self._title.text=title
2985 if javascript is not None:
2986 self._jscript=SubElement(self._head,'script')
2987 self._jscript.attrib['type']="text/javascript"
2988 self._jscript.text=str(javascript)
2989 if css is not None:
2990 self._css=SubElement(self._head,'style')
2991 self._css.attrib['type']="text/css"
2992 self._css.text=str(css)
2993
2994 def __str__(self):
2995 return self.doctype_str+'\n'+self.toprettyxml()
2996
2997 def add_section(self,section_name,legend=None):
2998 newSection=htmlSection(section_name)
2999 self._body.append(newSection._html)
3000 if legend is not None:
3001 legend.a('#%s'%section_name,'%s'%section_name)
3002 legend.br()
3003 return newSection
3004
3005 def add_collapse_section(self,section_name,legend=None,innertable_id=None,start_closed=True):
3006 """
3007 Create a section embedded into a table that can be collapsed with a button
3008 """
3009 newSection=htmlCollapseSection(section_name,table_id=innertable_id,start_closed=start_closed)
3010 self._body.append(newSection._html)
3011 if legend is not None:
3012 legend.a('#%s'%section_name,'%s'%section_name)
3013 legend.br()
3014 return newSection
3015
3016 def add_section_to_element(self,section_name,parent):
3017 """
3018 Create a section which is not appended to the body of html, but to the parent Element
3019 """
3020 newSection=htmlSection(section_name,htmlElement=parent,blank=True)
3021 parent.append(newSection._html)
3022 return newSection
3023
3024
3025 @property
3026 def body(self):
3027 return self._body
3028
3029 @property
3030 def head(self):
3031 return self._head
3032
3033
3034class htmlSection(htmlChunk):
3035 """
3036 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3037 """
3038 def __init__(self,section_name,htmlElement=None,blank=False):
3039 if not blank:
3040 htmlChunk.__init__(self,'div',attrib={'class':'ppsection','id':section_name},parent=htmlElement)
3041 else:
3042 htmlChunk.__init__(self,'div',attrib={'style':'"color:#000000"','id':section_name},parent=htmlElement)
3043 self.h2(section_name)
3044
3045
3047 """
3048 Represents a block of html fitting within a htmlPage. Inherits from htmlChunk.
3049 """
3050
3051 def __init__(self,section_name,htmlElement=None,table_id=None,start_closed=True):
3052 htmlChunk.__init__(self,'div',attrib={'class':'ppsection','id':section_name},parent=htmlElement)
3053 # if table id is none, generate a random id:
3054 if table_id is None:
3055 table_id=random.randint(1,10000000)
3056 self.table_id=table_id
3057 self._start_closed=start_closed
3058
3059 def write(self,string):
3060 k=random.randint(1,10000000)
3061 if self._start_closed:
3062 st='<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[+] Expand" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self._html.attrib['id'],k,self.table_id,k)
3063 string=string.replace('table ', 'table style="display: none" ')
3064 else:
3065 st='<table border="0" align="center" cellpadding="5" cellspacing="0"><tr bgcolor="#4682B4" height="50"><td width="5%%"><font size="4" face="tahoma" color="white"><a href="#"> Top</a></font></td><td width="45%%"><font size="4" face="tahoma" color="white"><strong>%s</strong></font></td><td bgcolor="#4682B4" align="center" width="50%%"><input id="lnk%s" type="button" value="[-] Collapse" onclick="toggle_visibility(\'%s\',\'lnk%s\');"></input></td></tr><tr><td colspan="7">'%(self._html.attrib['id'],k,self.table_id,k)
3066 string=string.replace('table ', 'table style="display: table" ')
3067 st+=string
3068 st+='</td></tr></table>'
3069 htmlChunk.write(self,st)
3070
3071#===============================================================================
3072# Internal module functions
3073#===============================================================================
3074
3075
3076def _calculate_confidence_levels(hist, points, injBin, NSamples):
3077 """
3078 Returns (injectionconf, toppoints), where injectionconf is the
3079 confidence level of the injection, contained in the injBin and
3080 toppoints is a list of (pointx, pointy, ptindex, frac), with
3081 pointx and pointy the (x,y) coordinates of the corresponding
3082 element of the points array, ptindex the index of the point in the
3083 array, and frac the cumulative fraction of points with larger
3084 posterior probability.
3085
3086 The hist argument should be a one-dimensional array that contains
3087 counts of sample points in each bin.
3088
3089 The points argument should be a 2-D array storing the sky location
3090 associated with each bin; the first index runs from 0 to NBins -
3091 1, while the second index runs from 0 to 1.
3092
3093 The injBin argument gives the bin index in which the injection is
3094 found.
3095
3096 The NSamples argument is used to normalize the histogram counts
3097 into fractional probability.
3098 """
3099
3100 histIndices=np.argsort(hist)[::-1] # In decreasing order
3101
3102 toppoints=[]
3103 frac=0.0
3104 injConf=None
3105 for i in histIndices:
3106 frac+=float(hist[i])/float(NSamples)
3107 toppoints.append((points[i,0], points[i,1], i, frac))
3108 if i == injBin:
3109 injConf=frac
3110 print('Injection found at confidence level %g'%injConf)
3111
3112 return (injConf, toppoints)
3113
3114def _greedy_bin(greedyHist,greedyPoints,injection_bin_index,bin_size,Nsamples,confidence_levels):
3115 """
3116 An interal function representing the common, dimensionally-independent part of the
3117 greedy binning algorithms.
3118 """
3119
3120 #Now call confidence level C extension function to determine top-ranked pixels
3121 (injectionconfidence,toppoints)=_calculate_confidence_levels(greedyHist, greedyPoints, injection_bin_index, Nsamples)
3122
3123 #Determine interval/area contained within given confidence intervals
3124 nBins=0
3125 confidence_levels.sort()
3126 reses={}
3127 toppoints=np.array(toppoints)
3128 for printcl in confidence_levels:
3129 nBins=np.searchsorted(toppoints[:,3], printcl) + 1
3130
3131 if nBins >= len(toppoints):
3132 nBins=len(toppoints)-1
3133
3134 accl=toppoints[nBins-1,3]
3135
3136 reses[printcl]=nBins*bin_size
3137
3138 #Find area
3139 injection_area=None
3140 if injection_bin_index and injectionconfidence:
3141 i=list(np.nonzero(np.asarray(toppoints)[:,2]==injection_bin_index))[0]
3142 injection_area=bin_size*(i+1)
3143
3144 return toppoints,injectionconfidence,reses,injection_area
3145#
3146#### functions used in 2stage kdtree
3147
3148def skyArea(bounds):
3149 return - (cos(pi_constant/2. - bounds[0][1])-cos(pi_constant/2. - bounds[1][1]))*(bounds[1][0] - bounds[0][0])
3150
3151def random_split(items, fraction):
3152 size = int(len(items)*fraction)
3153 random.shuffle(items)
3154 return items[:size], items[size:]
3155
3156def addSample(tree,coordinates):
3157 if tree._left is None:
3158 tree.addSample()
3159 elif coordinates[tree._splitDim] < tree._splitValue:
3160 addSample(tree._left,coordinates)
3161 else:
3162 addSample(tree._right,coordinates)
3163
3164#
3165#===============================================================================
3166# Public module functions
3167#===============================================================================
3168
3169def kdtree_bin_sky_volume(posterior,confidence_levels):
3170
3171 confidence_levels.sort()
3172
3173 class Harvester(list):
3174
3175 def __init__(self):
3176 list.__init__(self)
3177 self.unrho=0.
3178
3179 def __call__(self,tree):
3180 number_density=float(len(tree.objects()))/float(tree.volume())
3181 self.append([number_density,tree.volume(),tree.bounds()])
3182 self.unrho+=number_density
3183
3184 def close_ranks(self):
3185
3186 for i in range(len(self)):
3187 self[i][0]/=self.unrho
3188
3189 return sorted(self,key=itemgetter(0))
3190
3191 def h(a,b):
3192 pass
3193
3194 samples,header=posterior.samples()
3195 header=header.split()
3196 coord_names=["ra","dec","dist"]
3197 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3198 tree=KDTree(coordinatized_samples)
3199
3200 a=Harvester()
3201 samples_per_bin=10
3202 tree.operate(a,h,boxing=samples_per_bin)
3203
3204 b=a.close_ranks()
3205 b.reverse()
3206
3207 acc_rho=0.
3208 acc_vol=0.
3209 cl_idx=0
3210 confidence_intervals={}
3211 for rho,vol,bounds in b:
3212 acc_rho+=rho
3213 acc_vol+=vol
3214
3215 if acc_rho>confidence_levels[cl_idx]:
3216 confidence_intervals[acc_rho]=acc_vol
3217 cl_idx+=1
3218 if cl_idx==len(confidence_levels):
3219 break
3220
3221 return confidence_intervals
3222
3223def kdtree_bin_sky_area(posterior,confidence_levels,samples_per_bin=10):
3224 """
3225 takes samples and applies a KDTree to them to return confidence levels
3226 returns confidence_intervals - dictionary of user_provided_CL:calculated_area
3227 b - ordered list of KD leaves
3228 injInfo - if injection values provided then returns
3229 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_area]
3230 Not quite sure that the repeated samples case is fixed, posibility of infinite loop.
3231 """
3232 confidence_levels.sort()
3233 from math import cos, pi
3234 class Harvester(list):
3235 """
3236 when called by kdtree.operate will be used to calculate the density of each bin (sky area)
3237 """
3238 def __init__(self):
3239 list.__init__(self)
3240 self.unrho=0.
3241
3242 def __call__(self,tree):
3243 #area = (cos(tree.bounds()[0][1])-cos(tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3244 area = - (cos(pi/2. - tree.bounds()[0][1])-cos(pi/2. - tree.bounds()[1][1]))*(tree.bounds()[1][0] - tree.bounds()[0][0])
3245 number_density=float(len(tree.objects()))/float(area)
3246 self.append([number_density,len(tree.objects()),area,tree.bounds()])
3247 self.unrho+=number_density
3248
3249 def close_ranks(self):
3250
3251 for i in range(len(self)):
3252 self[i][0]/=self.unrho
3253
3254 return sorted(self,key=itemgetter(0))
3255
3256 def h(a,b):
3257 pass
3258
3259 peparser=PEOutputParser('common')
3260
3261 samples,header=posterior.samples()
3262 header=header.split()
3263 coord_names=["ra","dec"]
3264 initial_dimensions = [[0.,-pi/2.],[2.*pi, pi/2.]]
3265 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3266 tree=KDTreeVolume(coordinatized_samples,initial_dimensions)
3267
3268 a=Harvester()
3269 tree.operate(a,h,boxing=samples_per_bin)
3270 totalSamples = len(tree.objects())
3271 b=a.close_ranks()
3272 b.reverse()
3273 samplecounter=0.0
3274 for entry in b:
3275 samplecounter += entry[1]
3276 entry[1] = float(samplecounter)/float(totalSamples)
3277
3278 acc_rho=0.
3279 acc_vol=0.
3280 cl_idx=0
3281
3282 #checks for injection and extract details of the node in the tree that the injection is found
3283 if posterior['ra'].injval is not None and posterior['dec'].injval is not None:
3284 injBound,injNum,injWeight = tree.search([posterior['ra'].injval,posterior['dec'].injval],boxing = samples_per_bin)
3285 injInfo = [injBound,injNum,injWeight]
3286 inj_area = - (cos(pi/2. - injBound[0][1])-cos(pi/2. - injBound[1][1]))*(injBound[1][0] - injBound[0][0])
3287 inj_number_density=float(injNum)/float(inj_area)
3288 inj_rho = inj_number_density / a.unrho
3289 else:
3290 injInfo = None
3291 inj_area = None
3292 inj_number_density=None
3293 inj_rho = None
3294
3295 #finds the volume contained within the confidence levels requested by user
3296 confidence_intervals={}
3297 for rho,confidence_level,vol,bounds in b:
3298 acc_vol+=vol
3299
3300 if confidence_level>confidence_levels[cl_idx]:
3301 print(str(confidence_level))
3302 print(acc_vol)
3303 confidence_intervals[confidence_levels[cl_idx]]=acc_vol
3304 cl_idx+=1
3305 if cl_idx==len(confidence_levels):
3306 break
3307
3308 acc_vol = 0.
3309 for rho,sample_number,vol,bounds in b:
3310 acc_vol+=vol
3311 print('total area: ' + str(acc_vol))
3312
3313 #finds the confidence level of the injection and the volume of the associated contained region
3314 inj_confidence = None
3315 inj_confidence_area = None
3316 if inj_rho is not None:
3317 acc_vol=0.
3318 for rho,confidence_level,vol,bounds in b:
3319 acc_vol+=vol
3320 if rho <= inj_rho:
3321 inj_confidence = confidence_level
3322 inj_confidence_area = acc_vol
3323 injInfo.append(inj_confidence)
3324 injInfo.append(inj_confidence_area)
3325 print('inj ' +str(vol))
3326 break
3327 return confidence_intervals, b, injInfo
3328
3329def kdtree_bin(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10):
3330 """
3331 takes samples and applies a KDTree to them to return confidence levels
3332 returns confidence_intervals - dictionary of user_provided_CL:calculated_volume
3333 b - ordered list of KD leaves
3334 initial_boundingbox - list of lists [upperleft_coords,lowerright_coords]
3335 injInfo - if injection values provided then returns
3336 [Bounds_of_inj_kd_leaf ,number_samples_in_box, weight_of_box,injection_CL ,injection_CL_volume]
3337 Not quite sure that the repeated samples case is fixed, posibility of infinite loop.
3338 """
3339 confidence_levels.sort()
3340 print(confidence_levels)
3341 class Harvester(list):
3342 """
3343 when called by kdtree.operate will be used to calculate the density of each bin
3344 """
3345 def __init__(self):
3346 list.__init__(self)
3347 self.unrho=0.
3348
3349 def __call__(self,tree):
3350 number_density=float(len(tree.objects()))/float(tree.volume())
3351 self.append([number_density,len(tree.objects()),tree.volume(),tree.bounds()])
3352 self.unrho+=number_density
3353
3354 def close_ranks(self):
3355
3356 for i in range(len(self)):
3357 self[i][0]/=self.unrho
3358
3359 return sorted(self,key=itemgetter(0))
3360
3361 def h(a,b):
3362 pass
3363
3364 peparser=PEOutputParser('common')
3365
3366 samples,header=posterior.samples()
3367 header=header.split()
3368 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samples]
3369
3370 #if initial bounding box is not provided, create it using max/min of sample coords.
3371 if initial_boundingbox is None:
3372 low=coordinatized_samples[0].coord()
3373 high=coordinatized_samples[0].coord()
3374 for obj in coordinatized_samples[1:]:
3375 low=np.minimum(low,obj.coord())
3376 high=np.maximum(high,obj.coord())
3377 initial_boundingbox = [low,high]
3378
3379 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3380
3381 a=Harvester()
3382 tree.operate(a,h,boxing=samples_per_bin)
3383
3384 b=a.close_ranks()
3385 b.reverse()
3386 totalSamples = len(tree.objects())
3387 samplecounter=0.0
3388 for entry in b:
3389 samplecounter += entry[1]
3390 entry[1] = float(samplecounter)/float(totalSamples)
3391
3392 acc_rho=0.
3393 acc_vol=0.
3394 cl_idx=0
3395
3396 #check that there is an injection value for all dimension names
3397 def checkNone(listoParams):
3398 for param in listoParams:
3399 if posterior[param].injval is None:
3400 return False
3401 return True
3402
3403 #checks for injection and extract details of the lnode in the tree that the injection is found
3404 if checkNone(coord_names):
3405 injBound,injNum,injWeight = tree.search([posterior[x].injval for x in coord_names],boxing = samples_per_bin)
3406 injInfo = [injBound,injNum,injWeight]
3407 #calculate volume of injections bin
3408 inj_volume = 1.
3409 low = injBound[0]
3410 high = injBound[1]
3411 for aCoord,bCoord in zip(low,high):
3412 inj_volume = inj_volume*(bCoord - aCoord)
3413 inj_number_density=float(injNum)/float(inj_volume)
3414 inj_rho = inj_number_density / a.unrho
3415 print(injNum,inj_volume,inj_number_density,a.unrho,injBound)
3416 else:
3417 injInfo = None
3418 inj_area = None
3419 inj_number_density=None
3420 inj_rho = None
3421
3422 #finds the volume contained within the confidence levels requested by user
3423 confidence_intervals={}
3424 for rho,sample_number,vol,bounds in b:
3425 acc_vol+=vol
3426
3427 if sample_number>confidence_levels[cl_idx]:
3428 confidence_intervals[confidence_levels[cl_idx]]=(acc_vol,sample_number)
3429 cl_idx+=1
3430 if cl_idx==len(confidence_levels):
3431 break
3432
3433 acc_vol = 0.
3434 for rho,sample_number,vol,bounds in b:
3435 acc_vol+=vol
3436
3437 #finds the confidence level of the injection and the volume of the associated contained region
3438 inj_confidence = None
3439 inj_confidence_area = None
3440 if inj_rho is not None:
3441 print('calculating cl')
3442 acc_vol=0.
3443 for rho,confidence_level,vol,bounds in b:
3444 acc_vol+=vol
3445 if rho <= inj_rho:
3446 inj_confidence = confidence_level
3447 inj_confidence_area = acc_vol
3448 injInfo.append(inj_confidence)
3449 injInfo.append(inj_confidence_area)
3450 break
3451
3452 return confidence_intervals, b, initial_boundingbox,injInfo
3453
3454def kdtree_bin2Step(posterior,coord_names,confidence_levels,initial_boundingbox = None,samples_per_bin = 10,injCoords = None,alternate = False, fraction = 0.5, skyCoords=False):
3455 """
3456 input: posterior class instance, list of confidence levels, optional choice of inital parameter space, samples per box in kdtree
3457 note initial_boundingbox is [[lowerbound of each param][upper bound of each param]], if not specified will just take limits of samples
3458 fraction is proportion of samples used for making the tree structure.
3459 returns: confidence_intervals, sorted list of kd objects, initial_boundingbox, injInfo
3460 where injInfo is [bounding box injection is found within, samples in said box, weighting of box (in case of repeated samples),inj_confidence, inj_confidence_area]
3461 """
3462 confidence_levels.sort()
3463
3464 samples,header=posterior.samples()
3465 numberSamples = len(samples)
3466 if alternate == False:
3467 samplesStructure, samplesFill = random_split(samples,fraction)
3468 else:
3469 samplesStructure = samples[:int(numberSamples*fraction)]
3470 samplesFill = samples[int(numberSamples*fraction):]
3471 samplesFillLen = len(samplesFill)
3472
3473 header=header.split()
3474 coordinatized_samples=[PosteriorSample(row, header, coord_names) for row in samplesStructure]
3475 #if initial bounding box is not provided, create it using max/min of sample coords.
3476 if skyCoords == True:
3477 initial_boundingbox = [[0,-pi_constant/2.],[2*pi_constant,pi_constant/2.]]
3478 if initial_boundingbox is None:
3479 low=coordinatized_samples[0].coord()
3480 high=coordinatized_samples[0].coord()
3481 for obj in coordinatized_samples[1:]:
3482 low=np.minimum(low,obj.coord())
3483 high=np.maximum(high,obj.coord())
3484 initial_boundingbox = [low,high]
3485 tree=KDTreeVolume(coordinatized_samples,initial_boundingbox)
3486 tree2fill = tree.fillNewTree(boxing=samples_per_bin, isArea = skyCoords)#set isArea True if looking at sky coords(modifies stored volume values
3487
3488 columns = []
3489 for name in coord_names:
3490 columns.append(header.index(name))
3491
3492 for sample in samplesFill:
3493 tempSample=[]
3494 for column in columns:
3495 tempSample.append(sample[column])
3496 addSample(tree2fill,tempSample)
3497
3498 def getValues(tree,listing):
3499 if tree._left is None:
3500 listing.append([tree.bounds(),tree._importance,tree._samples,tree._volume])
3501 else:
3502 getValues(tree._left,listing)
3503 getValues(tree._right,listing)
3504
3505 listLeaves = []
3506 getValues(tree2fill,listLeaves)
3507
3508 clSamples = []
3509 for cl in confidence_levels:
3510 clSamples.append(samplesFillLen*cl)
3511
3512 sortedLeavesList = sorted(listLeaves, key=lambda importance: importance[1])
3513 sortedLeavesList.reverse()
3514 runningTotalSamples = 0
3515 for i in range(len(sortedLeavesList)):
3516 runningTotalSamples += sortedLeavesList[i][2]
3517 sortedLeavesList[i].append(float(runningTotalSamples)/samplesFillLen,)
3518
3519
3520 level = 0
3521 countSamples = 0
3522 volume = 0
3523 lencl = len(clSamples)
3524 #finds confidence levels
3525 confidence_intervals={}
3526 interpConfAreas = {}
3527 countLeaves = 0
3528 for leaf in sortedLeavesList:
3529 countSamples += leaf[2]
3530 countLeaves += 1
3531 volume += leaf[3]
3532 if level < lencl and countSamples >= clSamples[level]:
3533 confidence_intervals[confidence_levels[level]]=(volume,float(countSamples)/samplesFillLen)
3534 interpConfAreas[confidence_levels[level]] = volume-leaf[3]*(countSamples-clSamples[level])/leaf[2]
3535 level +=1
3536
3537 if injCoords is not None:
3538 injBound,injNum,injImportance = tree2fill.search(injCoords)
3539 injInfo = [injBound,injNum,injImportance]
3540 else:
3541 injInfo = None
3542
3543
3544 #finds the confidence level of the injection and the volume of the associated contained region
3545 inj_confidence = None
3546 inj_confidence_area = None
3547 if injInfo is not None:
3548 acc_vol=0.
3549 acc_cl=0.
3550 for leaf in sortedLeavesList:
3551 acc_vol+=leaf[3]
3552 acc_cl+=leaf[2]
3553 if leaf[1] <= injImportance:
3554 inj_confidence = float(acc_cl)/samplesFillLen
3555 inj_confidence_area = acc_vol
3556 injInfo.append(inj_confidence)
3557 injInfo.append(inj_confidence_area)
3558 break
3559
3560 return sortedLeavesList, interpConfAreas, injInfo
3561 #check that there is an injection value for all dimension names
3562 def checkNone(listoParams):
3563 for param in listoParams:
3564 if posterior[param].injval is None:
3565 return False
3566 return True
3567
3568def greedy_bin_two_param(posterior,greedy2Params,confidence_levels):
3569 """
3570 Determine the 2-parameter Bayesian Confidence Intervals using a greedy
3571 binning algorithm.
3572
3573 @param posterior: an instance of the Posterior class.
3574
3575 @param greedy2Params: a dict - {param1Name:param1binSize,param2Name:param2binSize} .
3576
3577 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
3578 """
3579
3580 #Extract parameter names
3581 par1_name,par2_name=greedy2Params.keys()
3582
3583 #Set posterior array columns
3584 par1pos=posterior[par1_name.lower()].samples
3585 par2pos=posterior[par2_name.lower()].samples
3586
3587 #Extract bin sizes
3588 par1_bin=greedy2Params[par1_name]
3589 par2_bin=greedy2Params[par2_name]
3590
3591 #Extract injection information
3592 par1_injvalue=posterior[par1_name.lower()].injval
3593 par2_injvalue=posterior[par2_name.lower()].injval
3594
3595 #Create 2D bin array
3596 par1pos_min=min(par1pos)[0]
3597 par2pos_min=min(par2pos)[0]
3598
3599 par1pos_max=max(par1pos)[0]
3600 par2pos_max=max(par2pos)[0]
3601
3602 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
3603
3604 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
3605
3606 greedyHist = np.zeros(par1pos_Nbins*par2pos_Nbins,dtype='i8')
3607 greedyPoints = np.zeros((par1pos_Nbins*par2pos_Nbins,2))
3608
3609 #Fill bin values
3610 par1_point=par1pos_min
3611 par2_point=par2pos_min
3612 for i in range(par2pos_Nbins):
3613
3614 par1_point=par1pos_min
3615 for j in range(par1pos_Nbins):
3616
3617 greedyPoints[j+par1pos_Nbins*i,0]=par1_point
3618 greedyPoints[j+par1pos_Nbins*i,1]=par2_point
3619 par1_point+=par1_bin
3620 par2_point+=par2_bin
3621
3622
3623 #If injection point given find which bin its in...
3624 injbin=None
3625 if par1_injvalue is not None and par2_injvalue is not None:
3626
3627 par1_binNumber=int(floor((par1_injvalue-par1pos_min)/par1_bin))
3628 par2_binNumber=int(floor((par2_injvalue-par2pos_min)/par2_bin))
3629
3630 injbin=int(par1_binNumber+par2_binNumber*par1pos_Nbins)
3631 elif par1_injvalue is None and par2_injvalue is not None:
3632 print("Injection value not found for %s!"%par1_name)
3633
3634 elif par1_injvalue is not None and par2_injvalue is None:
3635 print("Injection value not found for %s!"%par2_name)
3636
3637 #Bin posterior samples
3638 for par1_samp,par2_samp in zip(par1pos,par2pos):
3639 par1_samp=par1_samp[0]
3640 par2_samp=par2_samp[0]
3641 par1_binNumber=int(floor((par1_samp-par1pos_min)/par1_bin))
3642 par2_binNumber=int(floor((par2_samp-par2pos_min)/par2_bin))
3643 try:
3644 greedyHist[par1_binNumber+par2_binNumber*par1pos_Nbins]+=1
3645 except:
3646 raise RuntimeError("Problem binning samples: %i,%i,%i,%i,%i,%f,%f,%f,%f,%f,%f .")%(par1_binNumber,par2_binNumber,par1pos_Nbins,par2pos_Nbins,par1_binNumber+par2_binNumber*par1pos_Nbins,par1_samp,par1pos_min,par1_bin,par1_samp,par2pos_min,par2_bin)
3647 #Call greedy bins routine
3648 toppoints,injection_cl,reses,injection_area=\
3649 _greedy_bin(
3650 greedyHist,
3651 greedyPoints,
3652 injbin,
3653 float(par1_bin*par2_bin),
3654 int(len(par1pos)),
3655 confidence_levels
3656 )
3657
3658 return toppoints,injection_cl,reses,injection_area
3659
3660def pol2cart(long,lat):
3661 """
3662 Utility function to convert longitude,latitude on a unit sphere to
3663 cartesian co-ordinates.
3664 """
3665
3666 x=np.cos(lat)*np.cos(long)
3667 y=np.cos(lat)*np.sin(long)
3668 z=np.sin(lat)
3669 return np.array([x,y,z])
3670#
3671
3672def sph2cart(r,theta,phi):
3673 """
3674 Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
3675 """
3676 x = r*np.sin(theta)*np.cos(phi)
3677 y = r*np.sin(theta)*np.sin(phi)
3678 z = r*np.cos(theta)
3679 return x,y,z
3680
3681
3682def cart2sph(x,y,z):
3683 """
3684 Utility function to convert cartesian coords to r,theta,phi.
3685 """
3686 r = np.sqrt(x*x + y*y + z*z)
3687 theta = np.arccos(z/r)
3688 phi = np.fmod(2*pi_constant + np.arctan2(y,x), 2*pi_constant)
3689
3690 return r,theta,phi
3691
3692
3693
3694def plot_sky_map(hpmap, outdir, inj=None, nest=True):
3695 """Plots a sky map from a healpix map, optionally including an
3696 injected position. This is a temporary map to display before
3697 ligo.skymap utility is used to generated a smoother one.
3698
3699 :param hpmap: An array representing a healpix map (in nested
3700 ordering if ``nest = True``).
3701
3702 :param outdir: The output directory.
3703
3704 :param inj: If not ``None``, then ``[ra, dec]`` of the injection
3705 associated with the posterior map.
3706
3707 :param nest: Flag indicating the pixel ordering in healpix.
3708
3709 """
3710
3711 fig = plt.figure(frameon=False, figsize=(8,6))
3712 hp.mollview(hpmap, nest=nest, min=0, max=np.max(hpmap), cmap='Greys', coord='E', fig=fig.number, title='Histogrammed skymap' )
3713 plt.grid(True,color='g',figure=fig)
3714
3715 if inj is not None:
3716 theta = np.pi/2.0 - inj[1]
3717 hp.projplot(theta, inj[0], '*', markerfacecolor='white', markeredgecolor='black', markersize=10)
3718
3719 plt.savefig(os.path.join(outdir, 'skymap.png'))
3720
3721 return fig
3722
3723def skymap_confidence_areas(hpmap, cls):
3724 """Returns the area (in square degrees) for each confidence level with
3725 a greedy binning algorithm for the given healpix map.
3726
3727 """
3728
3729 hpmap = hpmap / np.sum(hpmap) # Normalise to sum to one.
3730
3731 hpmap = np.sort(hpmap)[::-1] # Sort from largest to smallest
3732 cum_hpmap = np.cumsum(hpmap)
3733
3734 pixarea = hp.nside2pixarea(hp.npix2nside(hpmap.shape[0]))
3735 pixarea = pixarea*(180.0/np.pi)**2 # In square degrees
3736
3737 areas = []
3738 for cl in cls:
3739 npix = np.sum(cum_hpmap < cl) # How many pixels to sum before cl?
3740 areas.append(npix*pixarea)
3741
3742 return np.array(areas)
3743
3744def skymap_inj_pvalue(hpmap, inj, nest=True):
3745 """Returns the greedy p-value estimate for the given injection.
3746
3747 """
3748
3749 nside = hp.npix2nside(hpmap.shape[0])
3750 hpmap = hpmap / np.sum(hpmap) # Normalise to sum to one
3751
3752 injpix = hp.ang2pix(nside, np.pi/2.0-inj[1], inj[0], nest=nest)
3753 injvalue = hpmap[injpix]
3754
3755 return np.sum(hpmap[hpmap >= injvalue])
3756
3757#
3758
3759def mc2ms(mc,eta):
3760 """
3761 Utility function for converting mchirp,eta to component masses. The
3762 masses are defined so that m1>m2. The rvalue is a tuple (m1,m2).
3763 """
3764 root = np.sqrt(0.25-eta)
3765 fraction = (0.5+root) / (0.5-root)
3766 invfraction = 1/fraction
3767
3768 m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)
3769
3770 m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)
3771 return (m1,m2)
3772#
3773#
3774
3775def q2ms(mc,q):
3776 """
3777 Utility function for converting mchirp,q to component masses. The
3778 masses are defined so that m1>m2. The rvalue is a tuple (m1,m2).
3779 """
3780 factor = mc * np.power(1+q, 1.0/5.0);
3781 m1 = factor * np.power(q, -3.0/5.0);
3782 m2 = factor * np.power(q, 2.0/5.0);
3783 return (m1,m2)
3784#
3785#
3786
3787def q2eta(q):
3788 """
3789 Utility function for converting q to eta. The
3790 rvalue is eta.
3791 """
3792 eta = q/((1+q)*(1+q))
3793 return np.clip(eta,0,0.25) # Explicitly cap eta at 0.25, in case it exceeds this slightly due to floating-point issues
3794#
3795#
3796
3797def mc2q(mc,eta):
3798 """
3799 Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
3800 """
3801 m1,m2 = mc2ms(mc,eta)
3802 q = m2/m1
3803 return q
3804#
3805#
3806
3807def ang_dist(long1,lat1,long2,lat2):
3808 """
3809 Find the angular separation of (long1,lat1) and (long2,lat2), which are
3810 specified in radians.
3811 """
3812
3813 x1=np.cos(lat1)*np.cos(long1)
3814 y1=np.cos(lat1)*np.sin(long1)
3815 z1=np.sin(lat1)
3816 x2=np.cos(lat2)*np.cos(long2)
3817 y2=np.cos(lat2)*np.sin(long2)
3818 z2=np.sin(lat2)
3819 sep=np.acos(x1*x2+y1*y2+z1*z2)
3820 return(sep)
3821#
3822#
3823
3824def array_dot(vec1, vec2):
3825 """
3826 Calculate dot products between vectors in rows of numpy arrays.
3827 """
3828 if vec1.ndim==1:
3829 product = (vec1*vec2).sum()
3830 else:
3831 product = (vec1*vec2).sum(axis=1).reshape(-1,1)
3832 return product
3833#
3834#
3835
3836def array_ang_sep(vec1, vec2):
3837 """
3838 Find angles between vectors in rows of numpy arrays.
3839 """
3840 vec1_mag = np.sqrt(array_dot(vec1, vec1))
3841 vec2_mag = np.sqrt(array_dot(vec2, vec2))
3842 return np.arccos(array_dot(vec1, vec2)/(vec1_mag*vec2_mag))
3843#
3844#
3845
3846def array_polar_ang(vec):
3847 """
3848 Find polar angles of vectors in rows of a numpy array.
3849 """
3850 if vec.ndim==1:
3851 z = vec[2]
3852 else:
3853 z = vec[:,2].reshape(-1,1)
3854 norm = np.sqrt(array_dot(vec,vec))
3855 return np.arccos(z/norm)
3856#
3857#
3858
3859def rotation_matrix(angle, direction):
3860 """
3861 Compute general rotation matrices for a given angles and direction vectors.
3862 """
3863 cosa = np.cos(angle)
3864 sina = np.sin(angle)
3865 direction /= np.sqrt(array_dot(direction,direction))
3866 #Assume calculating array of rotation matrices.
3867 try:
3868 nSamps = len(angle)
3869 R = np.array( [np.diag([i,i,i]) for i in cosa.flat] )
3870 R += np.array( [np.outer(direction[i],direction[i])*(1.0-cosa[i]) for i in range(nSamps)] )
3871 R += np.array( [np.array( [[ 0.0, -direction[i,2], direction[i,1]],
3872 [ direction[i,2], 0.0, -direction[i,0]],
3873 [-direction[i,1], direction[i,0], 0.0 ]] ) * sina[i] for i in range(nSamps)] )
3874 #Only computing one rotation matrix.
3875 except TypeError:
3876 R = np.diag([cosa,cosa,cosa])
3877 R += np.outer(direction,direction) * (1.0 - cosa)
3878 R += np.array( [[ 0.0, -direction[2], direction[1]],
3879 [ direction[2], 0.0, -direction[0]],
3880 [-direction[1], direction[0], 0.0 ]] ) * sina
3881 return R
3882
3883
3884def ROTATEZ(angle, vx, vy, vz):
3885 # This is the ROTATEZ in LALSimInspiral.c.
3886 tmp1 = vx*np.cos(angle) - vy*np.sin(angle);
3887 tmp2 = vx*np.sin(angle) + vy*np.cos(angle);
3888 return np.asarray([tmp1,tmp2,vz])
3889
3890def ROTATEY(angle, vx, vy, vz):
3891 # This is the ROTATEY in LALSimInspiral.c
3892 tmp1 = vx*np.cos(angle) + vz*np.sin(angle);
3893 tmp2 = - vx*np.sin(angle) + vz*np.cos(angle);
3894 return np.asarray([tmp1,vy,tmp2])
3895
3896def orbital_momentum(fref, m1,m2, inclination):
3897 """
3898 Calculate orbital angular momentum vector.
3899 Note: The units of Lmag are different than what used in lalsimulation.
3900 Mc must be called in units of Msun here.
3901
3902 Note that if one wants to build J=L+S1+S2 with L returned by this function, S1 and S2
3903 must not get the Msun^2 factor.
3904 """
3905 eta = m1*m2/( (m1+m2)*(m1+m2) )
3906 Lmag = orbital_momentum_mag(fref, m1,m2,eta)
3907 Lx, Ly, Lz = sph2cart(Lmag, inclination, 0.0)
3908 return np.hstack((Lx,Ly,Lz))
3909#
3910#
3911def orbital_momentum_mag(fref, m1,m2,eta):
3912 v0 = np.power((m1+m2) *pi_constant * lal.MTSUN_SI * fref, 1.0/3.0)
3913 #1 PN Mtot*Mtot*eta/v
3914 PNFirst = (((m1+m2)**2)*eta)/v0
3915 PNSecond = 1+ (v0**2) * (3.0/2.0 +eta/6.0)
3916 Lmag= PNFirst*PNSecond
3917 return Lmag
3918
3919def component_momentum(m, a, theta, phi):
3920 """
3921 Calculate BH angular momentum vector.
3922 """
3923 Sx, Sy, Sz = sph2cart(m**2 * a, theta, phi)
3924 return np.hstack((Sx,Sy,Sz))
3925#
3926#
3927
3928def symm_tidal_params(lambda1,lambda2,q):
3929 """
3930 Calculate best tidal parameters [Eqs. (5) and (6) in Wade et al. PRD 89, 103012 (2014)]
3931 Requires q <= 1
3932 """
3933 lambdap = lambda1 + lambda2
3934 lambdam = lambda1 - lambda2
3935
3936 # Check that q <= 1, as expected
3937 if np.any(q > 1):
3938 raise ValueError("q > 1, while this function requires q <= 1.")
3939
3940 dmbym = (1. - q)/(1. + q) # Equivalent to sqrt(1 - 4*eta) for q <= 1
3941
3942 eta = q2eta(q)
3943
3944 lam_tilde = (8./13.)*((1.+7.*eta-31.*eta*eta)*lambdap + dmbym*(1.+9.*eta-11.*eta*eta)*lambdam)
3945 dlam_tilde = (1./2.)*(dmbym*(1.-13272.*eta/1319.+8944.*eta*eta/1319.)*lambdap + (1.-15910.*eta/1319.+32850.*eta*eta/1319.+3380.*eta*eta*eta/1319.)*lambdam)
3946 return lam_tilde, dlam_tilde
3947
3948def spin_angles(fref,mc,eta,incl,a1,theta1,phi1,a2=None,theta2=None,phi2=None):
3949 """
3950 Calculate physical spin angles.
3951 """
3952 singleSpin = None in (a2,theta2,phi2)
3953 m1, m2 = mc2ms(mc,eta)
3954 L = orbital_momentum(fref, m1,m2, incl)
3955 S1 = component_momentum(m1, a1, theta1, phi1)
3956 if not singleSpin:
3957 S2 = component_momentum(m2, a2, theta2, phi2)
3958 else:
3959 S2 = 0.0
3960 J = L + S1 + S2
3961 tilt1 = array_ang_sep(L,S1)
3962 if not singleSpin:
3963 tilt2 = array_ang_sep(L,S2)
3964 else:
3965 tilt2 = None
3966 theta_jn = array_polar_ang(J)
3967 beta = array_ang_sep(J,L)
3968 return tilt1, tilt2, theta_jn, beta
3969#
3970def chi_precessing(m1, a1, tilt1, m2, a2, tilt2):
3971 """
3972 Calculate the magnitude of the effective precessing spin
3973 following convention from Phys. Rev. D 91, 024043 -- arXiv:1408.1810
3974 note: the paper uses naming convention where m1 < m2
3975 (and similar for associated spin parameters) and q > 1
3976 """
3977 q_inv = m1/m2
3978 A1 = 2. + (3.*q_inv/2.)
3979 A2 = 2. + 3./(2.*q_inv)
3980 S1_perp = a1*np.sin(tilt1)*m1*m1
3981 S2_perp = a2*np.sin(tilt2)*m2*m2
3982 Sp = np.maximum(A1*S2_perp, A2*S1_perp)
3983 chi_p = Sp/(A2*m1*m1)
3984 return chi_p
3985
3986def calculate_redshift(distance,h=0.6790,om=0.3065,ol=0.6935,w0=-1.0):
3987 """
3988 Calculate the redshift from the luminosity distance measurement using the
3989 Cosmology Calculator provided in LAL.
3990 By default assuming cosmological parameters from arXiv:1502.01589 - 'Planck 2015 results. XIII. Cosmological parameters'
3991 Using parameters from table 4, column 'TT+lowP+lensing+ext'
3992 This corresponds to Omega_M = 0.3065, Omega_Lambda = 0.6935, H_0 = 67.90 km s^-1 Mpc^-1
3993 Returns an array of redshifts
3994 """
3995 def find_z_root(z,dl,omega):
3996 return dl - lal.LuminosityDistance(omega,z)
3997
3998 omega = lal.CreateCosmologicalParameters(h,om,ol,w0,0.0,0.0)
3999 if isinstance(distance,float):
4000 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (distance,omega))])
4001 else:
4002 z = np.array([newton(find_z_root,np.random.uniform(0.0,2.0),args = (d,omega)) for d in distance[:,0]])
4003 return z.reshape(z.shape[0],1)
4004
4005def source_mass(mass, redshift):
4006 """
4007 Calculate source mass parameter for mass m as:
4008 m_source = m / (1.0 + z)
4009 For a parameter m.
4010 """
4011 return mass / (1.0 + redshift)
4012
4013## Following functions added for testing Lorentz violations
4014def integrand_distance(redshift,nonGR_alpha):
4015 """
4016 Calculate D_alpha integral; multiplicative factor put later
4017 D_alpha = integral{ ((1+z')^(alpha-2))/sqrt(Omega_m*(1+z')^3 +Omega_lambda) dz'} # eq.15 of arxiv 1110.2720
4018 """
4019 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0) ## Planck 2015 values
4020 omega_m = omega.om # matter density
4021 omega_l = omega.ol # dark energy density
4022 #lal.DestroyCosmologicalParameters(omega)
4023 return (1.0+redshift)**(nonGR_alpha-2.0)/(np.sqrt(omega_m*(1.0+redshift)**3.0 + omega_l))
4024
4025def DistanceMeasure(redshift,nonGR_alpha):
4026 """
4027 D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720
4028 D_alpha calculated from integrand in above function
4029 """
4030 omega = lal.CreateCosmologicalParameters(0.6790,0.3065,0.6935,-1.0,0.0,0.0) ## Planck 2015 values
4031 H0 = omega.h*lal.H0FAC_SI ## Hubble constant in SI units
4032 dist = integrate.quad(integrand_distance, 0, redshift ,args=(nonGR_alpha))[0]
4033 dist *= (1.0 + redshift)**(1.0 - nonGR_alpha)
4034 dist /= H0
4035 #lal.DestroyCosmologicalParameters(omega)
4036 return dist*lal.C_SI ## returns D_alpha in metres
4037
4038def lambda_a(redshift, nonGR_alpha, lambda_eff, distance):
4039 """
4040 Converting from the effective wavelength-like parameter to lambda_A:
4041 lambda_A = lambda_{eff}*(D_alpha/D_L)^(1/(2-alpha))*(1/(1+z)^((1-alpha)/(2-alpha)))
4042 """
4043 Dfunc = np.vectorize(DistanceMeasure)
4044 D_alpha = Dfunc(redshift, nonGR_alpha)
4045 dl = distance*lal.PC_SI*1e6 ## luminosity distane in metres
4046 return lambda_eff*(D_alpha/(dl*(1.0+redshift)**(1.0-nonGR_alpha)))**(1./(2.0-nonGR_alpha))
4047
4048def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance):
4049 """
4050 Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A:
4051 A = (lambda_A/h)^(alpha-2) # eqn. 13 of arxiv 1110.2720
4052 """
4053 hPlanck = 4.13567e-15 # Planck's constant in eV.s
4054 ampFunc = np.vectorize(lambda_a)
4055 lambdaA = ampFunc(redshift, nonGR_alpha, lambda_eff, distance)/lal.C_SI # convert to seconds
4056 return (lambdaA/hPlanck)**(nonGR_alpha-2.0)
4057############################ changes for testing Lorentz violations made till here
4058
4059def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref,phiref):
4060 """
4061 Wrapper function for SimInspiralTransformPrecessingNewInitialConditions().
4062 Vectorizes function for use in append_mapping() methods of the posterior class.
4063 """
4064 try:
4065 import lalsimulation as lalsim
4066 except ImportError:
4067 print('bayespputils.py: Cannot import lalsimulation SWIG bindings to calculate physical to radiation')
4068 print('frame angles, did you remember to use --enable-swig-python when ./configuring lalsimulation?')
4069 return None
4070 from numpy import shape
4071 transformFunc = lalsim.SimInspiralTransformPrecessingNewInitialConditions
4072
4073 # Convert component masses to SI units
4074 m1_SI = m1*lal.MSUN_SI
4075 m2_SI = m2*lal.MSUN_SI
4076
4077 # Flatten arrays
4078 ins = [theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref]
4079 if len(shape(ins))>1:
4080 # ins is a list of lists (i.e. we are converting full posterior chains)
4081 try:
4082 for p,param in enumerate(ins):
4083 ins[p] = param.flatten()
4084 except:
4085 pass
4086
4087 try:
4088 results = np.array([transformFunc(t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir) for (t_jn, p_jl, t1, t2, p12, a1, a2, m1_SI, m2_SI, f,phir) in zip(*ins)])
4089 iota = results[:,0].reshape(-1,1)
4090 spin1x = results[:,1].reshape(-1,1)
4091 spin1y = results[:,2].reshape(-1,1)
4092 spin1z = results[:,3].reshape(-1,1)
4093 spin2x = results[:,4].reshape(-1,1)
4094 spin2y = results[:,5].reshape(-1,1)
4095 spin2z = results[:,6].reshape(-1,1)
4096 a1,theta1,phi1 = cart2sph(spin1x,spin1y,spin1z)
4097 a2,theta2,phi2 = cart2sph(spin2x,spin2y,spin2z)
4098
4099 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4100 L = orbital_momentum(fref, m1,m2, iota)
4101 S1 = np.hstack([m1*m1*spin1x,m1*m1*spin1y,m1*m1*spin1z])
4102 S2 = np.hstack([m2*m2*spin2x,m2*m2*spin2y,m2*m2*spin2z])
4103 J = L + S1 + S2
4104 beta = array_ang_sep(J,L)
4105
4106 return iota, theta1, phi1, theta2, phi2, beta
4107 except: # Catch all exceptions, including failure for the transformFunc
4108 # Something went wrong, returning None
4109 return None
4110
4111 elif len(shape(ins))<=1:
4112 # ins is a list of floats (i.e. we are converting the injected values) or empty
4113 try:
4114 for p,param in enumerate(ins):
4115 ins[p] = param
4116 except:
4117 pass
4118
4119 try:
4120 results = np.array(transformFunc(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1_SI, m2_SI, fref,phiref))
4121 iota = results[0]
4122 spin1x = results[1]
4123 spin1y = results[2]
4124 spin1z = results[3]
4125 spin2x = results[4]
4126 spin2y = results[5]
4127 spin2z = results[6]
4128 a1,theta1,phi1 = cart2sph(spin1x,spin1y,spin1z)
4129 a2,theta2,phi2 = cart2sph(spin2x,spin2y,spin2z)
4130
4131 mc = np.power(m1*m2,3./5.)*np.power(m1+m2,-1./5.)
4132 L = orbital_momentum(fref, m1,m2, iota)
4133 S1 = m1*m1*np.hstack([spin1x,spin1y,spin1z])
4134 S2 = m2*m2*np.hstack([spin2x,spin2y,spin2z])
4135 J = L + S1 + S2
4136 beta = array_ang_sep(J,L)
4137
4138 return iota, theta1, phi1, theta2, phi2, beta
4139
4140 except: # Catch all exceptions, including failure for the transformFunc
4141 # Something went wrong, returning None
4142 return None
4143#
4144#
4145
4146def plot_one_param_pdf_kde(fig,onedpos):
4147
4148 from scipy import seterr as sp_seterr
4149
4150 np.seterr(under='ignore')
4151 sp_seterr(under='ignore')
4152 pos_samps=onedpos.samples
4153 try:
4154 gkde=onedpos.gaussian_kde
4155 except np.linalg.linalg.LinAlgError:
4156 print('Singular matrix in KDE. Skipping')
4157 else:
4158 ind=np.linspace(np.min(pos_samps),np.max(pos_samps),101)
4159 kdepdf=gkde.evaluate(ind)
4160 plt.plot(ind,kdepdf,color='green')
4161 return
4162
4163
4164def plot_one_param_pdf(posterior,plot1DParams,analyticPDF=None,analyticCDF=None,plotkde=False):
4165 """
4166 Plots a 1D histogram and (gaussian) kernel density estimate of the
4167 distribution of posterior samples for a given parameter.
4168
4169 @param posterior: an instance of the Posterior class.
4170
4171 @param plot1DParams: a dict; {paramName:Nbins}
4172
4173 @param analyticPDF: an analytic probability distribution function describing the distribution.
4174
4175 @param analyticCDF: an analytic cumulative distribution function describing the distribution.
4176
4177 @param plotkde: Use KDE to smooth plot (default: False)
4178 """
4179
4180 matplotlib.rcParams['text.usetex']=False
4181
4182 param=list(plot1DParams.keys())[0].lower()
4183 histbins=list(plot1DParams.values())[0]
4184
4185 pos_samps=posterior[param].samples
4186 injpar=posterior[param].injval
4187 trigvals=posterior[param].trigvals
4188
4189 #myfig=plt.figure(figsize=(4,3.5),dpi=200)
4190 myfig=plt.figure(figsize=(6,4.5),dpi=150)
4191 #axes=plt.Axes(myfig,[0.2, 0.2, 0.7,0.7])
4192 axes=plt.Axes(myfig,[0.15,0.15,0.6,0.76])
4193 myfig.add_axes(axes)
4194 majorFormatterX=ScalarFormatter(useMathText=True)
4195 majorFormatterX.format_data=lambda data:'%.6g'%(data)
4196 majorFormatterY=ScalarFormatter(useMathText=True)
4197 majorFormatterY.format_data=lambda data:'%.6g'%(data)
4198 majorFormatterX.set_scientific(True)
4199 majorFormatterY.set_scientific(True)
4200 offset=0.0
4201 if param.find('time')!=-1:
4202 offset=floor(min(pos_samps))
4203 pos_samps=pos_samps-offset
4204 if injpar is not None:
4205 injpar=injpar-offset
4206 ax1_name=param+' + %i'%(int(offset))
4207 else: ax1_name=param
4208
4209 (n, bins, patches)=plt.hist(pos_samps,histbins,density=True,facecolor='grey')
4210 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4211 if Nchars>8:
4212 Nticks=3
4213 elif Nchars>5:
4214 Nticks=4
4215 elif Nchars>4:
4216 Nticks=6
4217 else:
4218 Nticks=6
4219 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks)
4220 xmin,xmax=plt.xlim()
4221 if param=='rightascension' or param=='ra':
4222 locatorX=RALocator(min=xmin,max=xmax)
4223 majorFormatterX=RAFormatter()
4224 if param=='declination' or param=='dec':
4225 locatorX=DecLocator(min=xmin,max=xmax)
4226 majorFormatterX=DecFormatter()
4227 axes.xaxis.set_major_formatter(majorFormatterX)
4228 axes.yaxis.set_major_formatter(majorFormatterY)
4229
4230 locatorX.view_limits(bins[0],bins[-1])
4231 axes.xaxis.set_major_locator(locatorX)
4232 if plotkde: plot_one_param_pdf_kde(myfig,posterior[param])
4233 histbinSize=bins[1]-bins[0]
4234 if analyticPDF:
4235 (xmin,xmax)=plt.xlim()
4236 x = np.linspace(xmin,xmax,2*len(bins))
4237 plt.plot(x, analyticPDF(x+offset), color='r', linewidth=2, linestyle='dashed')
4238 if analyticCDF:
4239 # KS underflows with too many samples
4240 max_samps=1000
4241 from numpy.random import permutation
4242 samps = permutation(pos_samps)[:max_samps,:].flatten()
4243 D,p = stats.kstest(samps+offset, analyticCDF, mode='asymp')
4244 plt.title("%s: ks p-value %.3f"%(param,p))
4245
4246 rbins=None
4247
4248 if injpar is not None:
4249 # We will plot the injection if it is <5% outside the posterior
4250 delta_samps=max(pos_samps)-min(pos_samps)
4251 minrange=min(pos_samps)-0.05*delta_samps
4252 maxrange=max(pos_samps)+0.05*delta_samps
4253 if minrange<injpar and maxrange>injpar:
4254
4255 plt.axvline(injpar, color='r', linestyle='-.', linewidth=4)
4256
4257 #rkde=gkde.integrate_box_1d(min(pos[:,i]),getinjpar(injection,i))
4258 #print "r of injected value of %s (kde) = %f"%(param,rkde)
4259
4260 #Find which bin the true value is in
4261 if min(pos_samps)<injpar and max(pos_samps)>injpar:
4262 bins_to_inj=(injpar-bins[0])/histbinSize
4263 injbinh=int(floor(bins_to_inj))
4264 injbin_frac=bins_to_inj-float(injbinh)
4265
4266 #Integrate over the bins
4267 rbins=(sum(n[0:injbinh-1])+injbin_frac*n[injbinh])*histbinSize
4268
4269 if trigvals is not None:
4270 for IFO in [IFO for IFO in trigvals.keys()]:
4271 trigval = trigvals[IFO]
4272 if min(pos_samps)<trigval and max(pos_samps)>trigval:
4273 if IFO=='H1': color = 'r'
4274 elif IFO=='L1': color = 'g'
4275 elif IFO=='V1': color = 'm'
4276 else: color = 'c'
4277 plt.axvline(trigval, color=color, linestyle='-.')
4278 #
4279 plt.grid()
4280 plt.xlabel(plot_label(ax1_name))
4281 plt.ylabel('Probability Density')
4282
4283 # For RA and dec set custom labels and for RA reverse
4284 if(param.lower()=='ra' or param.lower()=='rightascension'):
4285 xmin,xmax=plt.xlim()
4286 plt.xlim(xmax,xmin)
4287 #if(param.lower()=='ra' or param.lower()=='rightascension'):
4288 # locs, ticks = plt.xticks()
4289 # newlocs, newticks = formatRATicks(locs)
4290 # plt.xticks(newlocs,newticks,rotation=45)
4291 #if(param.lower()=='dec' or param.lower()=='declination'):
4292 # locs, ticks = plt.xticks()
4293 # newlocs, newticks = formatDecTicks(locs)
4294 # plt.xticks(newlocs,newticks,rotation=45)
4295
4296 return rbins,myfig#,rkde
4297#
4298
4299class RALocator(matplotlib.ticker.MultipleLocator):
4300 """
4301 RA tick locations with some intelligence
4302 """
4303 def __init__(self,min=0.0,max=2.0*pi_constant):
4304 hour=pi_constant/12.0
4305 if(max-min)>12.0*hour:
4306 base=3.0*hour
4307 elif(max-min)>6.0*hour:
4308 base=2.0*hour
4309 # Put hour ticks if there are more than 3 hours displayed
4310 elif (max-min)>3.0*pi_constant/12.0:
4311 base=hour
4312 elif (max-min)>hour:
4313 base=hour/2.0
4314 else:
4315 base=hour/4.0
4316
4317 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4318
4319class DecLocator(matplotlib.ticker.MultipleLocator):
4320 """
4321 Dec tick locations with some intelligence
4322 """
4323 def __init__(self, min=-pi_constant/2.0,max=pi_constant/2.0):
4324 deg=pi_constant/180.0
4325 if (max-min)>60*deg:
4326 base=30.0*deg
4327 elif (max-min)>20*deg:
4328 base=10*deg
4329 elif (max-min)>10*deg:
4330 base=5*deg
4331 else:
4332 base=deg
4333 matplotlib.ticker.MultipleLocator.__init__(self,base=base)
4334
4335class RAFormatter(matplotlib.ticker.FuncFormatter):
4336 def __init__(self,accuracy='auto'):
4337 matplotlib.ticker.FuncFormatter.__init__(self,getRAString)
4338
4339class DecFormatter(matplotlib.ticker.FuncFormatter):
4340 def __init__(self,accuracy='auto'):
4341 matplotlib.ticker.FuncFormatter.__init__(self,getDecString)
4342
4343
4344
4345def getRAString(radians,accuracy='auto'):
4346 secs=radians*12.0*3600/pi_constant
4347 hours, rem = divmod(secs, 3600 )
4348 mins,rem = divmod(rem, 60 )
4349 secs = rem
4350 if secs>=59.5:
4351 secs=secs-60
4352 mins=mins+1
4353 if mins>=59.5:
4354 mins=mins-60
4355 hours=hours+1
4356 if accuracy=='hour': return r'%ih'%(hours)
4357 if accuracy=='min': return r'%ih%im'%(hours,mins)
4358 if accuracy=='sec': return r'%ih%im%2.0fs'%(hours,mins,secs)
4359 else:
4360 if abs(fmod(secs,60.0))>=0.5: return(getRAString(radians,accuracy='sec'))
4361 if abs(fmod(mins,60.0))>=0.5: return(getRAString(radians,accuracy='min'))
4362 else: return(getRAString(radians,accuracy='hour'))
4363
4364def getDecString(radians,accuracy='auto'):
4365 # LaTeX doesn't like unicode degree symbols etc
4366 if matplotlib.rcParams['text.usetex']:
4367 degsymb='$^\\circ$'
4368 minsymb="'"
4369 secsymb="''"
4370 else:
4371 degsymb=chr(0x0B0)
4372 minsymb=chr(0x027)
4373 secsymb=chr(0x2033)
4374 if(radians<0):
4375 radians=-radians
4376 sign=-1
4377 else: sign=+1
4378 deg,rem=divmod(radians,pi_constant/180.0)
4379 mins, rem = divmod(rem, pi_constant/(180.0*60.0))
4380 secs = rem * (180.0*3600.0)/pi_constant
4381 #if secs>=59.5:
4382 # secs=secs-60.0
4383 # mins=mins+1
4384 #if mins>=59.5:
4385 # mins=mins-60.0
4386 # deg=deg+1
4387 if (accuracy=='arcmin' or accuracy=='deg') and secs>30: mins=mins+1
4388 if accuracy=='deg' and mins>30: deg=deg+1
4389 if accuracy=='deg': return '%i'%(sign*deg)+degsymb
4390 if accuracy=='arcmin': return '%i%s%i%s'%(sign*deg,degsymb,mins,minsymb)
4391 if accuracy=='arcsec': return '%i%s%i%s%2.0f%s'%(sign*deg,degsymb,mins,minsymb,secs,secsymb)
4392 else:
4393 # if abs(fmod(secs,60.0))>=0.5 and abs(fmod(secs,60)-60)>=0.5 : return(getDecString(sign*radians,accuracy='arcsec'))
4394 # if abs(fmod(mins,60.0))>=0.5 and abs(fmod(mins,60)-60)>=0.5: return(getDecString(sign*radians,accuracy='arcmin'))
4395 # else: return(getDecString(sign*radians,accuracy='deg'))
4396 return(getDecString(sign*radians,accuracy='deg'))
4397
4398def plot_corner(posterior,levels,parnames=None):
4399 """
4400 Make a corner plot using the triangle module
4401 (See http://github.com/dfm/corner.py)
4402 @param posterior: The Posterior object
4403 @param levels: a list of confidence levels
4404 @param parnames: list of parameters to include
4405 """
4406 try:
4407 import corner
4408 except ImportError:
4409 try:
4410 import triangle as corner
4411 except ImportError:
4412 print('Cannot load corner module. Try running\n\t$ pip install corner')
4413 return None
4414 parnames=list(filter(lambda x: x in posterior.names, parnames))
4415 labels = [plot_label(parname) for parname in parnames]
4416 data = np.hstack([posterior[p].samples for p in parnames])
4417 if posterior.injection:
4418 injvals=[posterior[p].injval for p in parnames]
4419 myfig=corner.corner(data,labels=labels,truths=injvals,quantiles=levels,plot_datapoints=False,bins=20)
4420 else:
4421 myfig=corner.corner(data,labels=labels,quantiles=levels,plot_datapoints=False,bins=20)
4422 return(myfig)
4423
4424
4425def plot_two_param_kde_greedy_levels(posteriors_by_name,plot2DkdeParams,levels,colors_by_name,line_styles=__default_line_styles,figsize=(4,3),dpi=250,figposition=[0.2,0.2,0.48,0.75],legend='right',hatches_by_name=None,Npixels=50):
4426 """
4427 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4428
4429 @param posteriors_by_name: dictionary of Posterior objects, indexed by name
4430
4431 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4432
4433 @param levels: list of credible levels
4434
4435 @param colors_by_name: dict of colors, indexed by name
4436
4437 @param line_styles: list of line styles for the credible levels
4438
4439 @param figsize: figsize to pass to matplotlib
4440
4441 @param dpi: dpi to pass to matplotlib
4442
4443 @param figposition: figposition to pass to matplotlib
4444
4445 @param legend: position of legend
4446
4447 @param hatches_by_name: dict of hatch styles indexed by name
4448
4449 @param Npixels: Number of pixels in each axis of the 2D grid
4450 """
4451
4452 from scipy import seterr as sp_seterr
4453 confidence_levels=levels
4454
4455 # Reversed parameter order here for consistency with the other
4456 # plotting functions
4457 par2_name,par1_name=plot2DkdeParams.keys()
4458 xbin=plot2DkdeParams[par1_name]
4459 ybin=plot2DkdeParams[par2_name]
4460 levels= levels
4461 np.seterr(under='ignore')
4462 sp_seterr(under='ignore')
4463
4464 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4465 plt.clf()
4466 axes=fig.add_axes(figposition)
4467 name_list=[]
4468
4469 #This fixes the precedence of line styles in the plot
4470 if len(line_styles)<len(levels):
4471 raise RuntimeError("Error: Need as many or more line styles to choose from as confidence levels to plot!")
4472
4473 CSlst=[]
4474 for name,posterior in posteriors_by_name.items():
4475 print('Plotting '+name)
4476 name_list.append(name)
4477 par1_injvalue=posterior[par1_name].injval
4478 par2_injvalue=posterior[par2_name].injval
4479
4480 par_trigvalues1=posterior[par1_name].trigvals
4481 par_trigvalues2=posterior[par2_name].trigvals
4482 xdat=posterior[par1_name].samples
4483 ydat=posterior[par2_name].samples
4484 a=np.squeeze(posterior[par1_name].samples)
4485 b=np.squeeze(posterior[par2_name].samples)
4486 offset=0.0
4487 if par1_name.find('time')!=-1:
4488 offset=floor(min(a))
4489 a=a-offset
4490 if par1_injvalue:
4491 par1_injvalue=par1_injvalue-offset
4492 ax1_name=par1_name+' + %i'%(int(offset))
4493 else: ax1_name=par1_name
4494
4495 if par2_name.find('time')!=-1:
4496 offset=floor(min(b))
4497 b=b-offset
4498 if par2_injvalue:
4499 par2_injvalue=par2_injvalue-offset
4500 ax2_name=par2_name+' + %i'%(int(offset))
4501 else: ax2_name=par2_name
4502
4503 samp=np.transpose(np.column_stack((xdat,ydat)))
4504
4505 try:
4506 kde=stats.kde.gaussian_kde(samp)
4507 den=kde(samp)
4508 except:
4509 return None
4510
4511 #grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4512 Nx=Npixels
4513 Ny=Npixels
4514 # Ugly hack to produce plots centred on the main mode:
4515 # Choose 1%-99% percentile range of the samples
4516 # Then zoom out by 0.95 to encompass the contour edges
4517 xsorted=np.sort(xdat,axis=None)
4518 ysorted=np.sort(ydat,axis=None)
4519 imin=int(0.01*len(xsorted))
4520 imax=int(0.99*len(xsorted))
4521 xmax=xsorted[imax]
4522 xmin=xsorted[imin]
4523 ymax=ysorted[imax]
4524 ymin=ysorted[imin]
4525 xmid=0.5*(xmin+xmax)
4526 ymid=0.5*(ymin+ymax)
4527 xmin=xmid+(xmin-xmid)/0.95
4528 xmax=xmid+(xmax-xmid)/0.95
4529 ymin=ymid+(ymin-ymid)/0.95
4530 ymax=ymid+(ymax-ymid)/0.95
4531 xax=np.linspace(xmin,xmax,Nx)
4532 yax=np.linspace(ymin,ymax,Ny)
4533 #print 'Plot limits %f-%f x %f-%f'%(xmin,xmax,ymin,ymax)
4534
4535 # Original way which includes all samples
4536 #xax = np.linspace(np.min(xdat),np.max(xdat),Nx)
4537 #yax = np.linspace(np.min(ydat),np.max(ydat),Ny)
4538
4539 # Do the actual plotting
4540 x,y = np.meshgrid(xax,yax)
4541 grid_coords = np.vstack( (x.flatten(),y.flatten()) )
4542 z = kde(grid_coords)
4543 z = z.reshape(Nx,Ny)
4544 densort=np.sort(den)[::-1]
4545 values=[]
4546 Npts=xdat.shape[0]
4547 zvalues=[]
4548 for level in levels:
4549 ilevel = int(Npts*level + 0.5)
4550 if ilevel >= Npts:
4551 ilevel = Npts-1
4552 zvalues.append(densort[ilevel])
4553 CS=plt.contour(x, y, z, zvalues,colors=[colors_by_name[name]],linestyles=line_styles )
4554 CSlst.append(CS)
4555
4556 if par1_injvalue is not None and par2_injvalue is not None:
4557 plt.plot([par1_injvalue],[par2_injvalue],'b*',scalex=False,scaley=False,markersize=12)
4558
4559 if par_trigvalues1 is not None and par_trigvalues2 is not None:
4560 par1IFOs = set([IFO for IFO in par_trigvalues1.keys()])
4561 par2IFOs = set([IFO for IFO in par_trigvalues2.keys()])
4562 IFOs = par1IFOs.intersection(par2IFOs)
4563 for IFO in IFOs:
4564 if IFO=='H1': color = 'r'
4565 elif IFO=='L1': color = 'g'
4566 elif IFO=='V1': color = 'm'
4567 else: color = 'c'
4568 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker='o',scalex=False,scaley=False)
4569
4570 plt.xlabel(plot_label(par1_name))
4571 plt.ylabel(plot_label(par2_name))
4572 plt.grid()
4573
4574 if len(name_list)!=len(CSlst):
4575 raise RuntimeError("Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4576
4577 full_name_list=[]
4578 dummy_lines=[]
4579 for plot_name in name_list:
4580 full_name_list.append(plot_name)
4581 if len(confidence_levels)>1:
4582 for ls_,cl in zip(line_styles[0:len(confidence_levels)],confidence_levels):
4583 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),ls=ls_,color='k'))
4584 full_name_list.append('%s%%'%str(int(cl*100)))
4585
4586 fig_actor_lst = [cs.collections[0] for cs in CSlst]
4587 fig_actor_lst.extend(dummy_lines)
4588 if legend is not None:
4589 try:
4590 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right',framealpha=0.1)
4591 except:
4592 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right')
4593 for text in twodcontour_legend.get_texts():
4594 text.set_fontsize('small')
4595
4596 majorFormatterX=ScalarFormatter(useMathText=True)
4597 majorFormatterX.format_data=lambda data:'%.4g'%(data)
4598 majorFormatterY=ScalarFormatter(useMathText=True)
4599 majorFormatterY.format_data=lambda data:'%.4g'%(data)
4600 majorFormatterX.set_scientific(True)
4601 majorFormatterY.set_scientific(True)
4602 axes.xaxis.set_major_formatter(majorFormatterX)
4603 axes.yaxis.set_major_formatter(majorFormatterY)
4604 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4605 if Nchars>8:
4606 Nticks=3
4607 elif Nchars>5:
4608 Nticks=4
4609 elif Nchars>4:
4610 Nticks=5
4611 else:
4612 Nticks=6
4613 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4614 if par1_name=='rightascension' or par1_name=='ra':
4615 (ramin,ramax)=plt.xlim()
4616 locatorX=RALocator(min=ramin,max=ramax)
4617 majorFormatterX=RAFormatter()
4618 if par1_name=='declination' or par1_name=='dec':
4619 (decmin,decmax)=plt.xlim()
4620 locatorX=DecLocator(min=decmin,max=decmax)
4621 majorFormatterX=DecFormatter()
4622 axes.xaxis.set_major_formatter(majorFormatterX)
4623 if par2_name=='rightascension' or par2_name=='ra':
4624 (ramin,ramax)=plt.ylim()
4625 locatorY=RALocator(ramin,ramax)
4626 axes.yaxis.set_major_locator(locatorY)
4627 majorFormatterY=RAFormatter()
4628 if par2_name=='declination' or par2_name=='dec':
4629 (decmin,decmax)=plt.ylim()
4630 locatorY=DecLocator(min=decmin,max=decmax)
4631 majorFormatterY=DecFormatter()
4632 axes.yaxis.set_major_locator(locatorY)
4633
4634 axes.yaxis.set_major_formatter(majorFormatterY)
4635 #locatorX.view_limits(bins[0],bins[-1])
4636 axes.xaxis.set_major_locator(locatorX)
4637
4638
4639 if(par1_name.lower()=='ra' or par1_name.lower()=='rightascension'):
4640 xmin,xmax=plt.xlim()
4641 if(xmin<0.0): xmin=0.0
4642 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4643 plt.xlim(xmax,xmin)
4644
4645 return fig
4646
4647def plot_two_param_kde(posterior,plot2DkdeParams):
4648 """
4649 Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
4650
4651 @param posterior: an instance of the Posterior class.
4652
4653 @param plot2DkdeParams: a dict {param1Name:Nparam1Bins,param2Name:Nparam2Bins}
4654 """
4655
4656 from scipy import seterr as sp_seterr
4657
4658 par1_name,par2_name=plot2DkdeParams.keys()
4659 Nx=plot2DkdeParams[par1_name]
4660 Ny=plot2DkdeParams[par2_name]
4661
4662 xdat=posterior[par1_name].samples
4663 ydat=posterior[par2_name].samples
4664
4665 par_injvalue1=posterior[par1_name].injval
4666 par_injvalue2=posterior[par2_name].injval
4667
4668 par_trigvalues1=posterior[par1_name].trigvals
4669 par_trigvalues2=posterior[par2_name].trigvals
4670
4671 np.seterr(under='ignore')
4672 sp_seterr(under='ignore')
4673
4674 myfig=plt.figure(1,figsize=(6,4),dpi=200)
4675 myfig.add_axes(plt.Axes(myfig,[0.20,0.20,0.75,0.7]))
4676 plt.clf()
4677
4678 xax=np.linspace(min(xdat),max(xdat),Nx)
4679 yax=np.linspace(min(ydat),max(ydat),Ny)
4680 x,y=np.meshgrid(xax,yax)
4681
4682 samp=np.transpose(np.column_stack((xdat,ydat)))
4683
4684 kde=stats.kde.gaussian_kde(samp)
4685
4686 grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)
4687
4688 z = kde(grid_coords.T)
4689 z = z.reshape(Nx,Ny)
4690
4691
4692 asp=xax.ptp()/yax.ptp()
4693# if(asp<0.8 or asp > 1.6): asp=1.4
4694 plt.imshow(z,extent=(xax[0],xax[-1],yax[0],yax[-1]),aspect=asp,origin='lower')
4695 plt.colorbar()
4696
4697 if par_injvalue1 is not None and par_injvalue2 is not None:
4698 plt.plot([par_injvalue1],[par_injvalue2],'bo',scalex=False,scaley=False)
4699
4700 if par_trigvalues1 is not None and par_trigvalues2 is not None:
4701 par1IFOs = set([IFO for IFO in par_trigvalues1.keys()])
4702 par2IFOs = set([IFO for IFO in par_trigvalues2.keys()])
4703 IFOs = par1IFOs.intersection(par2IFOs)
4704 for IFO in IFOs:
4705 if IFO=='H1': color = 'r'
4706 elif IFO=='L1': color = 'g'
4707 elif IFO=='V1': color = 'm'
4708 else: color = 'c'
4709 plt.plot([par_trigvalues1[IFO]],[par_trigvalues2[IFO]],color=color,marker='o',scalex=False,scaley=False)
4710
4711 plt.xlabel(plot_label(par1_name))
4712 plt.ylabel(plot_label(par2_name))
4713 plt.grid()
4714
4715 # For RA and dec set custom labels and for RA reverse
4716 if(par1_name.lower()=='ra' or par1_name.lower()=='rightascension'):
4717 xmin,xmax=plt.xlim()
4718 plt.xlim(xmax,xmin)
4719
4720
4721 return myfig
4722
4723def get_inj_by_time(injections,time):
4724 """
4725 Filter injections to find the injection with end time given by time +/- 0.1s
4726 """
4727 import itertools
4728 injection = itertools.ifilter(lambda a: abs(float(get_end(a)) - time) < 0.1, injections).next()
4729 return injection
4730
4731def histogram2D(posterior,greedy2Params,confidence_levels):
4732 """
4733 Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual discrete confidence levels
4734 imposed by the finite number of samples.
4735 H,xedges,yedges,Hlasts = histogram2D(posterior,greedy2Params,confidence_levels)
4736 @param posterior: Posterior instance
4737 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4738 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4739 """
4740
4741 par1_name,par2_name=greedy2Params.keys()
4742 par1_bin=greedy2Params[par1_name]
4743 par2_bin=greedy2Params[par2_name]
4744 par1_injvalue=posterior[par1_name.lower()].injval
4745 par2_injvalue=posterior[par2_name.lower()].injval
4746 a=np.squeeze(posterior[par1_name].samples)
4747 b=np.squeeze(posterior[par2_name].samples)
4748 par1pos_min=a.min()
4749 par2pos_min=b.min()
4750 par1pos_max=a.max()
4751 par2pos_max=b.max()
4752 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4753 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4754 H, xedges, yedges = np.histogram2d(a,b, bins=(par1pos_Nbins, par2pos_Nbins),density=True)
4755 temp=np.copy(H)
4756 temp=temp.ravel()
4757 confidence_levels.sort()
4758 Hsum=0
4759 Hlasts=[]
4760 idxes=np.argsort(temp)
4761 j=len(idxes)-1
4762 for cl in confidence_levels:
4763 while float(Hsum/np.sum(H))<cl:
4764 #ind = np.argsort(temp)
4765 max_i=idxes[j]
4766 j-=1
4767 val = temp[max_i]
4768 Hlast=val
4769 Hsum+=val
4770 temp[max_i]=0
4771 Hlasts.append(Hlast)
4772 return (H,xedges,yedges,Hlasts)
4773
4774def plot_two_param_greedy_bins_contourf(posteriors_by_name,greedy2Params,confidence_levels,colors_by_name,figsize=(7,6),dpi=120,figposition=[0.3,0.3,0.5,0.5],legend='right',hatches_by_name=None):
4775 """
4776 @param posteriors_by_name A dictionary of posterior objects indexed by name
4777 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4778 @param confidence_levels: a list of the required confidence levels to plot on the contour map.
4779 @param colors_by_name: dict of colors, indexed by name
4780 @param figsize: figsize to pass to matplotlib
4781 @param dpi: dpi to pass to matplotlib
4782 @param figposition: figposition to pass to matplotlib
4783 @param legend: Legend position to pass to matplotlib
4784 @param hatches_by_name: dict of hatch styles, indexed by name
4785 """
4786 fig=plt.figure(1,figsize=figsize,dpi=dpi)
4787 plt.clf()
4788 fig.add_axes(figposition)
4789 CSlst=[]
4790 name_list=[]
4791 par1_name,par2_name=greedy2Params.keys()
4792 for name,posterior in posteriors_by_name.items():
4793 name_list.append(name)
4794 H,xedges,yedges,Hlasts=histogram2D(posterior,greedy2Params,confidence_levels+[0.99999999999999])
4795 extent= [xedges[0], yedges[-1], xedges[-1], xedges[0]]
4796 CS2=plt.contourf(yedges[:-1],xedges[:-1],H,Hlasts,extend='max',colors=[colors_by_name[name]] ,alpha=0.3 )
4797 CS=plt.contour(yedges[:-1],xedges[:-1],H,Hlasts,extend='max',colors=[colors_by_name[name]] )
4798 CSlst.append(CS)
4799
4800 plt.title("%s-%s confidence contours (greedy binning)"%(par1_name,par2_name)) # add a title
4801 plt.xlabel(plot_label(par2_name))
4802 plt.ylabel(plot_label(par1_name))
4803 if len(name_list)!=len(CSlst):
4804 raise RuntimeError("Error number of contour objects does not equal number of names! Use only *one* contour from each set to associate a name.")
4805 full_name_list=[]
4806 dummy_lines=[]
4807 for plot_name in name_list:
4808 full_name_list.append(plot_name)
4809 if len(confidence_levels)>1:
4810 for cl in confidence_levels+[1]:
4811 dummy_lines.append(mpl_lines.Line2D(np.array([0.,1.]),np.array([0.,1.]),color='k'))
4812 full_name_list.append('%s%%'%str(int(cl*100)))
4813 fig_actor_lst = [cs.collections[0] for cs in CSlst]
4814 fig_actor_lst.extend(dummy_lines)
4815 if legend is not None:
4816 try:
4817 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right',framealpha=0.1)
4818 except:
4819 twodcontour_legend=plt.figlegend(tuple(fig_actor_lst), tuple(full_name_list), loc='right')
4820 for text in twodcontour_legend.get_texts():
4821 text.set_fontsize('small')
4822
4823 return fig
4824
4825
4826
4827def plot_two_param_greedy_bins_hist(posterior,greedy2Params,confidence_levels):
4828 """
4829 Histograms of the ranked pixels produced by the 2-parameter greedy
4830 binning algorithm colured by their confidence level.
4831
4832 @param posterior: an instance of the Posterior class.
4833
4834 @param greedy2Params: a dict ;{param1Name:param1binSize,param2Name:param2binSize}
4835
4836 @param confidence_levels: list of confidence levels to show
4837 """
4838
4839 from scipy import seterr as sp_seterr
4840
4841 np.seterr(under='ignore')
4842 sp_seterr(under='ignore')
4843
4844 #Extract parameter names
4845 par1_name,par2_name=greedy2Params.keys()
4846 #Extract bin sizes
4847 par1_bin=greedy2Params[par1_name]
4848 par2_bin=greedy2Params[par2_name]
4849
4850 a=np.squeeze(posterior[par1_name].samples)
4851 b=np.squeeze(posterior[par2_name].samples)
4852
4853 #Extract injection information
4854 par1_injvalue=posterior[par1_name.lower()].injval
4855 par2_injvalue=posterior[par2_name.lower()].injval
4856
4857 #Create 2D bin array
4858 par1pos_min=a.min()
4859 par2pos_min=b.min()
4860
4861 par1pos_max=a.max()
4862 par2pos_max=b.max()
4863
4864 par1pos_Nbins= int(ceil((par1pos_max - par1pos_min)/par1_bin))+1
4865 par2pos_Nbins= int(ceil((par2pos_max - par2pos_min)/par2_bin))+1
4866
4867 # Adjust for time parameter
4868 if par1_name.find('time')!=-1:
4869 offset=floor(min(a))
4870 a=a-offset
4871 if par1_injvalue:
4872 par1_injvalue=par1_injvalue-offset
4873 ax1_name=par1_name+' + %i'%(int(offset))
4874 else: ax1_name=par1_name
4875
4876 if par2_name.find('time')!=-1:
4877 offset=floor(min(b))
4878 b=b-offset
4879 if par2_injvalue:
4880 par2_injvalue=par2_injvalue-offset
4881 ax2_name=par2_name+' + %i'%(int(offset))
4882 else: ax2_name=par2_name
4883
4884
4885 #Extract trigger information
4886 par1_trigvalues=posterior[par1_name.lower()].trigvals
4887 par2_trigvalues=posterior[par2_name.lower()].trigvals
4888
4889 myfig=plt.figure()
4890 axes=plt.Axes(myfig,[0.3,0.3,0.95-0.3,0.90-0.3])
4891 myfig.add_axes(axes)
4892
4893 #plt.clf()
4894 plt.xlabel(plot_label(ax2_name))
4895 plt.ylabel(plot_label(ax1_name))
4896
4897 #bins=(par1pos_Nbins,par2pos_Nbins)
4898 bins=(50,50) # Matches plot_one_param_pdf
4899
4900 majorFormatterX=ScalarFormatter(useMathText=True)
4901 majorFormatterX.format_data=lambda data:'%.4g'%(data)
4902 majorFormatterY=ScalarFormatter(useMathText=True)
4903 majorFormatterY.format_data=lambda data:'%.4g'%(data)
4904 majorFormatterX.set_scientific(True)
4905 majorFormatterY.set_scientific(True)
4906 axes.xaxis.set_major_formatter(majorFormatterX)
4907 axes.yaxis.set_major_formatter(majorFormatterY)
4908 H, xedges, yedges = np.histogram2d(a,b, bins,density=False)
4909
4910
4911 #Replace H with greedy bin confidence levels at each pixel...
4912 temp=np.copy(H)
4913 temp=temp.flatten()
4914
4915 Hsum=0
4916 Hsum_actual=np.sum(H)
4917
4918 idxes=np.argsort(temp)
4919 j=len(idxes)-1
4920 while Hsum<Hsum_actual:
4921 #ind = np.argsort(temp)
4922 max_i=idxes[j]
4923 j-=1
4924 val = temp[max_i]
4925 Hsum+=int(val)
4926 temp[max_i]=0
4927
4928 #print Hsum,Hsum_actual
4929 H.flat[max_i]=1-float(Hsum)/float(Hsum_actual)
4930
4931 extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
4932 plt.imshow(np.flipud(H), axes=axes, aspect='auto', extent=extent, interpolation='nearest',cmap='gray_r')
4933 plt.gca().autoscale_view()
4934 plt.colorbar()
4935
4936 #plt.hexbin(a,b,cmap='gray_r',axes=axes )
4937
4938 Nchars=max(map(lambda d:len(majorFormatterX.format_data(d)),axes.get_xticks()))
4939 if Nchars>8:
4940 Nticks=3
4941 elif Nchars>5:
4942 Nticks=4
4943 elif Nchars>4:
4944 Nticks=5
4945 else:
4946 Nticks=6
4947 locatorX=matplotlib.ticker.MaxNLocator(nbins=Nticks-1)
4948 (xmin,xmax)=plt.xlim()
4949 (ymin,ymax)=plt.ylim()
4950 if par2_name=='rightascension' or par2_name=='ra':
4951 locatorX=RALocator(min=xmin,max=xmax)
4952 majorFormatterX=RAFormatter()
4953 if par2_name=='declination' or par2_name=='dec':
4954 locatorX=DecLocator(min=xmin,max=xmax)
4955 majorFormatterX=DecFormatter()
4956 if par1_name=='rightascension' or par1_name=='ra':
4957 locatorY=RALocator(min=ymin,max=ymax)
4958 axes.yaxis.set_major_locator(locatorY)
4959 majorFormatterY=RAFormatter()
4960 if par1_name=='declination' or par1_name=='dec':
4961 locatorY=DecLocator(min=ymin,max=ymax)
4962 axes.yaxis.set_major_locator(locatorY)
4963 majorFormatterY=DecFormatter()
4964
4965 axes.xaxis.set_major_formatter(majorFormatterX)
4966 axes.yaxis.set_major_formatter(majorFormatterY)
4967 #locatorX.view_limits(bins[0],bins[-1])
4968 axes.xaxis.set_major_locator(locatorX)
4969
4970 if par1_injvalue is not None and par2_injvalue is not None:
4971 plt.plot([par2_injvalue],[par1_injvalue],'bo',scalex=False,scaley=False)
4972
4973
4974
4975 if(par2_name.lower()=='ra' or par2_name.lower()=='rightascension'):
4976 xmin,xmax=plt.xlim()
4977 if(xmin)<0.0: xmin=0.0
4978 if(xmax>2.0*pi_constant): xmax=2.0*pi_constant
4979 plt.xlim(xmax,xmin)
4980
4981
4982 return myfig
4983
4984def greedy_bin_one_param(posterior,greedy1Param,confidence_levels):
4985 """
4986 Determine the 1-parameter Bayesian Confidence Interval using a greedy
4987 binning algorithm.
4988
4989 @param posterior: an instance of the posterior class.
4990
4991 @param greedy1Param: a dict; {paramName:paramBinSize}.
4992
4993 @param confidence_levels: A list of floats of the required confidence intervals [(0-1)].
4994 """
4995
4996 paramName=list(greedy1Param.keys())[0]
4997 par_bin=list(greedy1Param.values())[0]
4998 par_samps=posterior[paramName.lower()].samples
4999
5000 parpos_min=min(par_samps)[0]
5001 parpos_max=max(par_samps)[0]
5002
5003 par_point=parpos_min
5004
5005 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5006
5007 greedyPoints=np.zeros((parpos_Nbins,2))
5008 # ...NB 2D so it can be put through same confidence level function
5009 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5010
5011 #Bin up
5012 for i in range(parpos_Nbins):
5013 greedyPoints[i,0]=par_point
5014 greedyPoints[i,1]=par_point
5015 par_point+=par_bin
5016
5017 for par_samp in par_samps:
5018 par_samp=par_samp[0]
5019 par_binNumber=int(floor((par_samp-parpos_min)/par_bin))
5020 try:
5021 greedyHist[par_binNumber]+=1
5022 except IndexError:
5023 print("IndexError: bin number: %i total bins: %i parsamp: %f "\
5024 %(par_binNumber,parpos_Nbins,par_samp))
5025
5026 #Find injection bin
5027 injbin=None
5028 par_injvalue=posterior[paramName].injval
5029 if par_injvalue:
5030 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5031 injbin=par_binNumber
5032
5033 toppoints,injectionconfidence,reses,injection_area=_greedy_bin(greedyHist,greedyPoints,injbin,float(par_bin),int(len(par_samps)),confidence_levels)
5034 cl_intervals=[]
5035 confidence_levels.sort()
5036 for cl in confidence_levels:
5037 ind=np.nonzero(toppoints[:,-1]<cl)
5038
5039 if len(ind[0]) > 1:
5040 cl_intervals.append((np.min(toppoints[ind,0]),np.max(toppoints[ind,0])))
5041
5042 else:
5043
5044 cl_intervals.append((toppoints[ind[0],0],toppoints[ind[0],0]))
5045
5046 return toppoints,injectionconfidence,reses,injection_area,cl_intervals
5047
5048#
5049def contigious_interval_one_param(posterior,contInt1Params,confidence_levels):
5050 """
5051 Calculates the smallest contigious 1-parameter confidence interval for a
5052 set of given confidence levels.
5053
5054 @param posterior: an instance of the Posterior class.
5055
5056 @param contInt1Params: a dict {paramName:paramBinSize}.
5057
5058 @param confidence_levels: Required confidence intervals.
5059
5060 """
5061 oneDContCL={}
5062 oneDContInj={}
5063
5064 paramName=list(contInt1Params.keys())[0]
5065 par_bin=list(contInt1Params.values())[0]
5066
5067 par_injvalue=posterior[paramName].injval
5068
5069 par_samps=posterior[paramName].samples
5070
5071 parpos_min=min(par_samps)
5072 parpos_max=max(par_samps)
5073
5074 par_point=parpos_min
5075 parpos_Nbins= int(ceil((parpos_max - parpos_min)/par_bin))+1
5076
5077 greedyHist=np.zeros(parpos_Nbins,dtype='i8')
5078
5079 for par_samp in par_samps:
5080 par_binNumber=int(floor((par_samp-parpos_min)/par_bin))
5081 try:
5082 greedyHist[par_binNumber]+=1
5083 except IndexError:
5084 print("IndexError: bin number: %i total bins: %i parsamp: %f bin: %i"\
5085 %(
5086 par_binNumber,
5087 parpos_Nbins,
5088 par_samp,
5089 par_binNumber
5090 ))
5091
5092 injbin=None
5093 #Find injection bin
5094 if par_injvalue:
5095 par_binNumber=floor((par_injvalue-parpos_min)/par_bin)
5096 injbin=par_binNumber
5097
5098 j=0
5099 #print "Calculating contigious confidence intervals for %s..."%par_name
5100 len_par_samps=len(par_samps)
5101
5102 injinterval=None
5103
5104 #Determine smallest contigious interval for given confidence levels (brute force)
5105 while j < len(confidence_levels):
5106 confidence_level=confidence_levels[j]
5107 #Loop over size of interval
5108 max_left=0
5109 max_right=0
5110
5111 for i in range(len(greedyHist)):
5112
5113 max_frac=None
5114 left=0
5115 right=i
5116
5117 #Slide interval
5118 while right<len(greedyHist):
5119 Npoints=sum(greedyHist[left:right])
5120 frac=float(Npoints)/float(len_par_samps)
5121 #print "left %i , right %i , frac %f"%(left,right,frac)
5122
5123 if max_frac is None:
5124 max_frac=frac
5125 max_left=left
5126 max_right=right
5127 else:
5128 if frac>max_frac:
5129 max_frac=frac
5130 max_left=left
5131 max_right=right
5132
5133 left+=1
5134 right+=1
5135
5136 if injbin is not None and injinterval is None:
5137 if injbin in range(max_left,max_right):
5138 injinterval=(max_right-max_left)*par_bin
5139 oneDContInj['interval']=injinterval
5140 oneDContInj['confidence']=1-frac
5141 if max_frac > confidence_level:
5142 break
5143
5144 max_frac=None
5145
5146 if max_frac is None:
5147 print("Cant determine intervals at %f confidence!"%confidence_level)
5148 else:
5149
5150 oneDContCL['left']=max_left*par_bin
5151 oneDContCL['right']=max_right*par_bin
5152 oneDContCL['width']=(max_right-max_left)*par_bin
5153 k=j
5154 while k+1<len(confidence_levels) :
5155 if confidence_levels[k+1]<max_frac:
5156 j+=1
5157 k+=1
5158 j+=1
5159
5160 return oneDContCL,oneDContInj
5161
5162
5163class ACLError(Exception):
5164 def __init__(self, *args):
5165 super(ACLError, self).__init__(*args)
5166
5167
5168def autocorrelation(series):
5169 """Returns an estimate of the autocorrelation function of a given
5170 series. Returns only the positive-lag portion of the ACF,
5171 normalized so that the zero-th element is 1."""
5172 x=series-np.mean(series)
5173 y=np.conj(x[::-1])
5174
5175 acf=np.fft.ifftshift(signal.fftconvolve(y,x,mode='full'))
5176
5177 N=series.shape[0]
5178
5179 acf = acf[0:N]
5180
5181 return acf/acf[0]
5182
5183
5184def autocorrelation_length_estimate(series, acf=None, M=5, K=2):
5185 """Attempts to find a self-consistent estimate of the
5186 autocorrelation length of a given series.
5187
5188 If C(tau) is the autocorrelation function (normalized so C(0) = 1,
5189 for example from the autocorrelation procedure in this module),
5190 then the autocorrelation length is the smallest s such that
5191
5192 1 + 2*C(1) + 2*C(2) + ... + 2*C(M*s) < s
5193
5194 In words: the autocorrelation length is the shortest length so
5195 that the sum of the autocorrelation function is smaller than that
5196 length over a window of M times that length.
5197
5198 The maximum window length is restricted to be len(series)/K as a
5199 safety precaution against relying on data near the extreme of the
5200 lags in the ACF, where there is a lot of noise. Note that this
5201 implies that the series must be at least M*K*s samples long in
5202 order to get a reliable estimate of the ACL.
5203
5204 If no such s can be found, raises ACLError; in this case it is
5205 likely that the series is too short relative to its true
5206 autocorrelation length to obtain a consistent ACL estimate."""
5207
5208 if acf is None:
5209 acf=autocorrelation(series)
5210 acf[1:] *= 2.0
5211
5212 imax=int(acf.shape[0]/K)
5213
5214 # Cumulative sum and ACL length associated with each window
5215 cacf=np.cumsum(acf)
5216 s=np.arange(1, cacf.shape[0]+1)/float(M)
5217
5218 # Find all places where cumulative sum over window is smaller than
5219 # associated ACL.
5220 estimates=np.flatnonzero(cacf[:imax] < s[:imax])
5221
5222 if estimates.shape[0] > 0:
5223 # Return the first index where cumulative sum is smaller than
5224 # ACL associated with that index's window
5225 return s[estimates[0]]
5226 else:
5227 # Cannot find self-consistent ACL estimate.
5228 raise ACLError('autocorrelation length too short for consistent estimate')
5229
5230
5231def effectiveSampleSize(samples, Nskip=1):
5232 """
5233 Compute the effective sample size, calculating the ACL using only
5234 the second half of the samples to avoid ACL overestimation due to
5235 chains equilibrating after adaptation.
5236 """
5237 N = len(samples)
5238 acf = autocorrelation(samples[int(N/2):])
5239 try:
5240 acl = autocorrelation_length_estimate(samples[int(N/2):], acf=acf)
5241 except ACLError:
5242 acl = N
5243 Neffective = floor(N/acl)
5244 acl *= Nskip
5245 return (Neffective, acl, acf)
5246
5247
5248def readCoincXML(xml_file, trignum):
5249 triggers=None
5250
5251 from igwn_ligolw import lsctables
5252 from igwn_ligolw import utils
5253 coincXML = utils.load_filename(xml_file)
5254 coinc = lsctables.CoincTable.get_table(coincXML)
5255 coincMap = lsctables.CoincMapTable.get_table(coincXML)
5256 snglInsps = lsctables.SnglInspiralTable.get_table(coincXML)
5257
5258 if (trignum>len(coinc)):
5259 raise RuntimeError("Error: You asked for trigger %d, but %s contains only %d triggers" %(trignum,xml_file,len(coinc)))
5260 else:
5261 coincEventID = coinc.getColumnByName('coinc_event_id')[trignum]
5262 eventIDs = [row.event_id for row in coincMap if row.coinc_event_id == coincEventID]
5263 triggers = [row for row in snglInsps if row.event_id in eventIDs]
5264 return triggers
5265
5266#===============================================================================
5267# Parameter estimation codes results parser
5268#===============================================================================
5269
5270def find_ndownsample(samples, nDownsample):
5271 """
5272 Given a list of files, threshold value, and a desired
5273 number of outputs posterior samples, return the skip number to
5274 achieve the desired number of posterior samples.
5275 """
5276 if nDownsample is None:
5277 print("Max ACL(s):")
5278 splineParams=["spcal_npts", "spcal_active","constantcal_active"]
5279 for i in np.arange(25):
5280 for k in lal.cached_detector_by_prefix:
5281 splineParams.append(k.lower()+'_spcal_freq_'+str(i))
5282 splineParams.append(k.lower()+'_spcal_logfreq_'+str(i))
5283
5284 nonParams = ["logpost", "post", "cycle", "timestamp", "snrh1", "snrl1", "snrv1",
5285 "margtime","margtimephi","margtime","time_max","time_min",
5286 "time_mean", "time_maxl","sky_frame","psdscaleflag","logdeltaf","flow","f_ref",
5287 "lal_amporder","lal_pnorder","lal_approximant","tideo","spino","signalmodelflag",
5288 "temperature","nifo","nlocaltemps","ntemps","randomseed","samplerate","segmentlength","segmentstart",
5289 "t0", "phase_maxl", "azimuth", "cosalpha", "lal_amporder", "bluni"] + logParams + snrParams + splineParams
5290 fixedParams = [p for p in samples.colnames if all(x==samples[p][0] for x in samples[p])]
5291 print("Fixed parameters: "+str(fixedParams))
5292 nonParams.extend(fixedParams)
5293 params = [p for p in samples.colnames if p.lower() not in nonParams]
5294 stride=np.diff(samples['cycle'])[0]
5295 results = np.array([np.array(effectiveSampleSize(samples[param])[:2]) for param in params])
5296 nEffs = results[:,0]
5297 nEffective = min(nEffs)
5298 ACLs = results[:,1]
5299 maxACLind = np.argmax(ACLs)
5300 maxACL = ACLs[maxACLind]
5301 # Get index in header, which includes "non-params"
5302 print("%i (%s)." %(stride*maxACL,params[maxACLind]))
5303
5304 nskip = 1
5305 if nDownsample is not None:
5306 if len(samples) > nDownsample:
5307 nskip *= floor(len(samples)/nDownsample)
5308 nskip = int(nskip)
5309 else:
5310 nEff = nEffective
5311 if nEff > 1:
5312 if len(samples) > nEff:
5313 nskip = int(ceil(len(samples)/nEff))
5314 else:
5315 nskip = np.nan
5316 return nskip
5317
5318class PEOutputParser(object):
5319 """
5320 A parser for the output of Bayesian parameter estimation codes.
5321
5322 TODO: Will be abstract class when LDG moves over to Python >2.6,
5323 inherited by each method .
5324 """
5325 def __init__(self,inputtype):
5326 if inputtype == 'ns':
5327 self._parser=self._ns_to_pos
5328 elif inputtype == 'common':
5329 self._parser=self._common_to_pos
5330 elif inputtype == 'fm':
5332 elif inputtype == "inf_mcmc":
5333 self._parser=self._infmcmc_to_pos
5334 elif inputtype == "xml":
5335 self._parser=self._xml_to_pos
5336 elif inputtype == 'hdf5':
5337 self._parser = self._hdf5_to_pos
5338 elif inputtype == 'hdf5s':
5339 self._parser = self._hdf5s_to_pos
5340 else:
5341 raise ValueError('Invalid value for "inputtype": %r' % inputtype)
5342
5343 def parse(self,files,**kwargs):
5344 """
5345 Parse files.
5346 """
5347 return self._parser(files,**kwargs)
5348
5349 def _infmcmc_to_pos(self,files,outdir=None,deltaLogP=None,fixedBurnins=None,nDownsample=None,oldMassConvention=False,**kwargs):
5350 """
5351 Parser for lalinference_mcmcmpi output.
5352 """
5353 if not (fixedBurnins is None):
5354 if not (deltaLogP is None):
5355 print("Warning: using deltaLogP criteria in addition to fixed burnin")
5356 if len(fixedBurnins) == 1 and len(files) > 1:
5357 print("Only one fixedBurnin criteria given for more than one output. Applying this to all outputs.")
5358 fixedBurnins = np.ones(len(files),'int')*fixedBurnins[0]
5359 elif len(fixedBurnins) != len(files):
5360 raise RuntimeError("Inconsistent number of fixed burnin criteria and output files specified.")
5361 print("Fixed burning criteria: ",fixedBurnins)
5362 else:
5363 fixedBurnins = np.zeros(len(files))
5364 logPThreshold=-np.inf
5365 if not (deltaLogP is None):
5366 logPThreshold= - deltaLogP
5367 print("Eliminating any samples before log(Post) = ", logPThreshold)
5368 nskips=self._find_ndownsample(files, logPThreshold, fixedBurnins, nDownsample)
5369 if nDownsample is None:
5370 print("Downsampling to take only uncorrelated posterior samples from each file.")
5371 if len(nskips) == 1 and np.isnan(nskips[0]):
5372 print("WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5373 nskips=self._find_ndownsample(files, logPThreshold, fixedBurnins, 10000)
5374 else:
5375 for i in range(len(nskips)):
5376 if np.isnan(nskips[i]):
5377 print("%s eliminated since all samples are correlated.")
5378 else:
5379 print("Downsampling by a factor of ", nskips[0], " to achieve approximately ", nDownsample, " posterior samples")
5380 if outdir is None:
5381 outdir=''
5382 runfileName=os.path.join(outdir,"lalinfmcmc_headers.dat")
5383 postName="posterior_samples.dat"
5384 runfile=open(runfileName, 'w')
5385 outfile=open(postName, 'w')
5386 try:
5387 self._infmcmc_output_posterior_samples(files, runfile, outfile, logPThreshold, fixedBurnins, nskips, oldMassConvention)
5388 finally:
5389 runfile.close()
5390 outfile.close()
5391 return self._common_to_pos(open(postName,'r'))
5392
5393
5394 def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False):
5395 """
5396 Concatenate all the samples from the given files into outfile.
5397 For each file, only those samples past the point where the
5398 log(post) > logPThreshold are concatenated after eliminating
5399 fixedBurnin.
5400 """
5401 nRead=0
5402 outputHeader=False
5403 acceptedChains=0
5404 if nskips is None:
5405 nskips = np.ones(len(files),'int')
5406 for infilename,i,nskip,fixedBurnin in zip(files,range(1,len(files)+1),nskips,fixedBurnins):
5407 infile=open(infilename,'r')
5408 try:
5409 print("Writing header of %s to %s"%(infilename,runfile.name))
5410 runInfo,header=self._clear_infmcmc_header(infile)
5411 runfile.write('Chain '+str(i)+':\n')
5412 runfile.writelines(runInfo)
5413 print("Processing file %s to %s"%(infilename,outfile.name))
5414 write_fref = False
5415 if 'f_ref' not in header:
5416 write_fref = True
5417 f_ref=self._find_infmcmc_f_ref(runInfo)
5418 if oldMassConvention:
5419 # Swap #1 for #2 because our old mass convention
5420 # has m2 > m1, while the common convention has m1
5421 # > m2
5422 header=[self._swaplabel12(label) for label in header]
5423 if not outputHeader:
5424 for label in header:
5425 outfile.write(label)
5426 outfile.write(" ")
5427 if write_fref:
5428 outfile.write("f_ref")
5429 outfile.write(" ")
5430 outfile.write("chain")
5431 outfile.write("\n")
5432 outputHeader=header
5433 iterindex=header.index("cycle")
5434 logpindex=header.index("logpost")
5435 output=False
5436 for line in infile:
5437 line=line.lstrip()
5438 lineParams=line.split()
5439 iter=int(lineParams[iterindex])
5440 logP=float(lineParams[logpindex])
5441 if (iter > fixedBurnin) and (logP >= logPThreshold):
5442 output=True
5443 if output:
5444 if nRead % nskip == 0:
5445 for label in outputHeader:
5446 # Note that the element "a1" in the
5447 # *header* actually already
5448 # corresponds to the "a2" *column* of
5449 # the input because we switched the
5450 # names above
5451 outfile.write(lineParams[header.index(label)])
5452 outfile.write("\t")
5453 if write_fref:
5454 outfile.write(f_ref)
5455 outfile.write("\t")
5456 outfile.write(str(i))
5457 outfile.write("\n")
5458 nRead=nRead+1
5459 if output: acceptedChains += 1
5460 finally:
5461 infile.close()
5462 print("%i of %i chains accepted."%(acceptedChains,len(files)))
5463
5464 def _swaplabel12(self, label):
5465 if label[-1] == '1':
5466 return label[0:-1] + '2'
5467 elif label[-1] == '2':
5468 return label[0:-1] + '1'
5469 else:
5470 return label[:]
5471
5472 def _find_max_logP(self, files):
5473 """
5474 Given a list of files, reads them, finding the maximum log(post)
5475 """
5476 maxLogP = -np.inf
5477 for inpname in files:
5478 infile=open(inpname, 'r')
5479 try:
5480 runInfo,header=self._clear_infmcmc_header(infile)
5481 logpindex=header.index("logpost")
5482 for line in infile:
5483 line=line.lstrip().split()
5484 logP=float(line[logpindex])
5485 if logP > maxLogP:
5486 maxLogP=logP
5487 finally:
5488 infile.close()
5489 print("Found max log(post) = ", maxLogP)
5490 return maxLogP
5491
5492 def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample):
5493 """
5494 Given a list of files, threshold value, and a desired
5495 number of outputs posterior samples, return the skip number to
5496 achieve the desired number of posterior samples.
5497 """
5498 nfiles = len(files)
5499 ntots=[]
5500 nEffectives = []
5501 if nDownsample is None: print("Max ACL(s):")
5502 for inpname,fixedBurnin in zip(files,fixedBurnins):
5503 infile = open(inpname, 'r')
5504 try:
5505 runInfo,header = self._clear_infmcmc_header(infile)
5506 header = [name.lower() for name in header]
5507 logpindex = header.index("logpost")
5508 iterindex = header.index("cycle")
5509 deltaLburnedIn = False
5510 fixedBurnedIn = False
5511 adapting = True
5512 lines=[]
5513 ntot=0
5514 for line in infile:
5515 line = line.lstrip().split()
5516 iter = int(line[iterindex])
5517 logP = float(line[logpindex])
5518 if iter > fixedBurnin:
5519 fixedBurnedIn = True
5520 # If adaptation reset, throw out what was collected so far
5521 elif fixedBurnedIn:
5522 fixedBurnedIn = False
5523 ntot = 0
5524 lines = []
5525 if logP > logPthreshold:
5526 deltaLburnedIn = True
5527 if iter > 0:
5528 adapting = False
5529 if fixedBurnedIn and deltaLburnedIn and not adapting:
5530 ntot += 1
5531 lines.append(line)
5532 ntots.append(ntot)
5533 if nDownsample is None:
5534 try:
5535 splineParams=["spcal_npts", "spcal_active","constantcal_active"]
5536 for i in np.arange(5):
5537 for k in ['h1','l1']:
5538 splineParams.append(k+'_spcal_freq'+str(i))
5539 splineParams.append(k+'_spcal_logfreq'+str(i))
5540
5541 nonParams = ["logpost", "cycle", "timestamp", "snrh1", "snrl1", "snrv1",
5542 "margtime","margtimephi","margtime","time_max","time_min",
5543 "time_mean", "time_maxl","sky_frame","psdscaleflag","logdeltaf","flow","f_ref",
5544 "lal_amporder","lal_pnorder","lal_approximant","tideo","spino","signalmodelflag",
5545 "temperature","nifo","nlocaltemps","ntemps","randomseed","samplerate","segmentlength","segmentstart",
5546 "t0", "phase_maxl", "azimuth", "cosalpha"] + logParams + snrParams + splineParams
5547 nonParamsIdxs = [header.index(name) for name in nonParams if name in header]
5548 samps = np.array(lines).astype(float)
5549 fixedIdxs = np.where(np.amin(samps,axis=0)-np.amax(samps,axis=0) == 0.0)[0]
5550 nonParamsIdxs.extend(fixedIdxs)
5551 paramIdxs = [i for i in range(len(header)) if i not in nonParamsIdxs]
5552 stride=samps[1,iterindex] - samps[0,iterindex]
5553 results = np.array([np.array(effectiveSampleSize(samps[:,i])[:2]) for i in paramIdxs])
5554 nEffs = results[:,0]
5555 nEffectives.append(min(nEffs))
5556 ACLs = results[:,1]
5557 maxACLind = np.argmax(ACLs)
5558 maxACL = ACLs[maxACLind]
5559 # Get index in header, which includes "non-params"
5560 maxACLind = paramIdxs[maxACLind]
5561 print("%i (%s) for chain %s." %(stride*maxACL,header[maxACLind],inpname))
5562 except:
5563 nEffectives.append(None)
5564 print("Error computing effective sample size of %s!"%inpname)
5565
5566 finally:
5567 infile.close()
5568 nskips = np.ones(nfiles)
5569 ntot = sum(ntots)
5570 if nDownsample is not None:
5571 if ntot > nDownsample:
5572 nskips *= int(floor(ntot/nDownsample))
5573 else:
5574 for i in range(nfiles):
5575 nEff = nEffectives[i]
5576 ntot = ntots[i]
5577 if nEff > 1:
5578 if ntot > nEff:
5579 nskips[i] = int(ceil(ntot/nEff))
5580 else:
5581 nskips[i] = None
5582 return nskips
5583
5584 def _find_infmcmc_f_ref(self, runInfo):
5585 """
5586 Searches through header to determine reference frequency of waveforms.
5587 If no fRef given, calls _find_infmcmc_f_lower to get the lower frequency
5588 bound, which is the default reference frequency for LALInference.
5589 """
5590 fRef = None
5591 runInfoIter = iter(runInfo)
5592 for line in runInfoIter:
5593 headers=line.lstrip().lower().split()
5594 try:
5595 fRefColNum = headers.index('f_ref') # strings get converted to all lower case
5596 info = runInfoIter.next().lstrip().lower().split()
5597 fRef = info[-1]#fRefColNum] # too many column names with spaces for this way to work. I just grab the last value. Hopefully we will update to xml output files and those messy headers will be gone.
5598 break
5599 except ValueError:
5600 continue
5601
5602 # ***TEMPORARY*** If not in table, check command line.
5603 # ...This is messy, but the only option for dealing with old headers
5604 if not fRef:
5605 runInfoIter = iter(runInfo)
5606 for line in runInfoIter:
5607 headers=line.lstrip().lower().split()
5608 try:
5609 if headers[0]=="command":
5610 try:
5611 fRefInd = headers.index('--fref')+1
5612 fRef = headers[fRefInd]
5613 except ValueError:
5614 pass
5615 break
5616 except IndexError:
5617 continue
5618
5619 # If no fRef is found, use lower frequency bound
5620 if not fRef:
5621 fRef = self._find_infmcmc_f_lower(runInfo)
5622
5623 return fRef
5624
5625 def _find_infmcmc_f_lower(self, runInfo):
5626 """
5627 Searches through header to determine starting frequency of waveforms.
5628 Assumes same for all IFOs.
5629 """
5630 runInfo = iter(runInfo)
5631 for line in runInfo:
5632 headers=line.lstrip().lower().split()
5633 try:
5634 flowColNum = headers.index('f_low')
5635 IFOinfo = runInfo.next().lstrip().lower().split()
5636 f_lower = IFOinfo[flowColNum]
5637 break
5638 except ValueError:
5639 continue
5640 return f_lower
5641
5642 def _clear_infmcmc_header(self, infile):
5643 """
5644 Reads lalinference_mcmcmpi file given, returning the run info and
5645 common output header information.
5646 """
5647 runInfo = []
5648 for line in infile:
5649 runInfo.append(line)
5650 headers=line.lstrip().lower().split()
5651 try:
5652 headers.index('cycle')
5653 break
5654 except ValueError:
5655 continue
5656 else:
5657 raise RuntimeError("couldn't find line with 'cycle' in LALInferenceMCMC input")
5658 return runInfo[:-1],headers
5659
5660
5661 def _ns_to_pos(self,files,Nlive=None,Npost=None,posfilename='posterior_samples.dat'):
5662 """
5663 Parser for nested sampling output.
5664 files : list of input NS files
5665 Nlive : Number of live points
5666 Npost : Desired number of posterior samples
5667 posfilename : Posterior output file name (default: 'posterior_samples.dat')
5668 """
5669 try:
5670 from lalinference.nest2pos import draw_N_posterior_many,draw_posterior_many
5671 except ImportError:
5672 print("Need lalinference.nest2pos to convert nested sampling output!")
5673 raise
5674
5675 if Nlive is None:
5676 raise RuntimeError("Need to specify number of live points in positional arguments of parse!")
5677
5678 #posfile.write('mchirp \t eta \t time \t phi0 \t dist \t RA \t dec \t
5679 #psi \t iota \t likelihood \n')
5680 # get parameter list
5681 it = iter(files)
5682
5683 # check if there's a file containing the parameter names
5684 parsfilename = (it.next()).strip('.gz')+'_params.txt'
5685
5686 if os.path.isfile(parsfilename):
5687 print('Looking for '+parsfilename)
5688
5689 if os.access(parsfilename,os.R_OK):
5690
5691 with open(parsfilename,'r') as parsfile:
5692 outpars=parsfile.readline()+'\n'
5693 else:
5694 raise RuntimeError('Cannot open parameters file %s!'%(parsfilename))
5695
5696 else: # Use hardcoded CBC parameter names
5697 outpars='mchirp \t eta \t time \t phi0 \t dist \t RA \t \
5698 dec \t psi \t iota \t logl \n'
5699
5700 # Find the logL column
5701 parsvec=outpars.split()
5702 logLcol=-1
5703 for i in range(len(parsvec)):
5704 if parsvec[i].lower()=='logl':
5705 logLcol=i
5706 if logLcol==-1:
5707 print('Error! Could not find logL column in parameter list: %s'%(outpars))
5708 raise RuntimeError
5709
5710 inarrays=list(map(np.loadtxt,files))
5711 if Npost is None:
5712 pos=draw_posterior_many(inarrays,[Nlive for f in files],logLcols=[logLcol for f in files])
5713 else:
5714 pos=draw_N_posterior_many(inarrays,[Nlive for f in files],Npost,logLcols=[logLcol for f in files])
5715
5716 with open(posfilename,'w') as posfile:
5717
5718 posfile.write(outpars)
5719
5720 for row in pos:
5721 for i in row:
5722 posfile.write('%10.12e\t' %(i))
5723 posfile.write('\n')
5724
5725 with open(posfilename,'r') as posfile:
5726 return_val=self._common_to_pos(posfile)
5727
5728 return return_val
5729
5730 def _followupmcmc_to_pos(self,files):
5731 """
5732 Parser for followupMCMC output.
5733 """
5734 return self._common_to_pos(open(files[0],'r'),delimiter=',')
5735
5736
5737 def _multinest_to_pos(self,files):
5738 """
5739 Parser for MultiNest output.
5740 """
5741 return self._common_to_pos(open(files[0],'r'))
5742
5743 def _xml_to_pos(self,infile):
5744 """
5745 Parser for VOTable XML Using
5746 """
5747 from xml.etree import ElementTree as ET
5748 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
5749 try:
5750 register_namespace=ET.register_namespace
5751 except AttributeError:
5752 def register_namespace(prefix,uri):
5753 ET._namespace_map[uri]=prefix
5754 register_namespace('vot',xmlns)
5755 tree = ET.ElementTree()
5756
5757 tree.parse(infile)
5758 # Find the posterior table
5759 tables = tree.findall('.//{%s}TABLE'%(xmlns))
5760 for table in tables:
5761 if table.get('utype')=='lalinference:results:posteriorsamples':
5762 return(self._VOTTABLE2pos(table))
5763 for table in tables:
5764 if table.get('utype')=='lalinference:results:nestedsamples':
5765 nsresource=[node for node in tree.findall('{%s}RESOURCE'%(xmlns)) if node.get('utype')=='lalinference:results'][0]
5766 return(self._VOTTABLE2pos(vo_nest2pos(nsresource)))
5767 raise RuntimeError('Cannot find "Posterior Samples" TABLE element in XML input file %s'%(infile))
5768
5769 def _VOTTABLE2pos(self,table):
5770 """
5771 Parser for a VOT TABLE element with FIELDs and TABLEDATA elements
5772 """
5773 from xml.etree import ElementTree as ET
5774 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
5775 try:
5776 register_namespace=ET.register_namespace
5777 except AttributeError:
5778 def register_namespace(prefix,uri):
5779 ET._namespace_map[uri]=prefix
5780 register_namespace('vot',xmlns)
5781
5782 header=[]
5783 for field in table.findall('./{%s}FIELD'%(xmlns)):
5784 header.append(field.attrib['name'])
5785 if(len(header)==0):
5786 raise RuntimeError('Unable to find FIELD nodes for table headers in XML table')
5787 data=table.findall('./{%s}DATA'%(xmlns))
5788 tabledata=data[0].find('./{%s}TABLEDATA'%(xmlns))
5789 llines=[]
5790 for row in tabledata:
5791 llines.append(np.array(list(map(lambda a:float(a.text),row))))
5792 flines=np.array(llines)
5793 for i in range(0,len(header)):
5794 if header[i].lower().find('log')!=-1 and header[i].lower() not in logParams and re.sub('log', '', header[i].lower()) not in [h.lower() for h in header] and header[i].lower() not in lorentzInvarianceViolationParams:
5795 print('exponentiating %s'%(header[i]))
5796
5797 flines[:,i]=np.exp(flines[:,i])
5798
5799 header[i]=re.sub('log', '', header[i], flags=re.IGNORECASE)
5800 if header[i].lower().find('sin')!=-1 and re.sub('sin', '', header[i].lower()) not in [h.lower() for h in header]:
5801 print('asining %s'%(header[i]))
5802 flines[:,i]=np.arcsin(flines[:,i])
5803 header[i]=re.sub('sin', '', header[i], flags=re.IGNORECASE)
5804 if header[i].lower().find('cos')!=-1 and re.sub('cos', '', header[i].lower()) not in [h.lower() for h in header]:
5805 print('acosing %s'%(header[i]))
5806 flines[:,i]=np.arccos(flines[:,i])
5807 header[i]=re.sub('cos', '', header[i], flags=re.IGNORECASE)
5808 header[i]=header[i].replace('(','')
5809 header[i]=header[i].replace(')','')
5810 print('Read columns %s'%(str(header)))
5811 return header,flines
5812
5813 def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5814 from astropy.table import vstack
5815
5816 if fixedBurnins is None:
5817 fixedBurnins = np.zeros(len(infiles))
5818
5819 if len(infiles) > 1:
5820 multiple_chains = True
5821
5822 chains = []
5823 for i, [infile, fixedBurnin] in enumerate(zip(infiles, fixedBurnins)):
5824 chain = self._hdf5_to_table(infile, fixedBurnin=fixedBurnin, deltaLogP=deltaLogP, nDownsample=nDownsample, multiple_chains=multiple_chains, tablename=tablename, **kwargs)
5825 chain.add_column(astropy.table.Column(i*np.ones(len(chain)), name='chain'))
5826 chains.append(chain)
5827
5828 # Apply deltaLogP criteria across chains
5829 if deltaLogP is not None:
5830 logPThreshold = -np.inf
5831 for chain in chains:
5832 if len(chain) > 0:
5833 logPThreshold = max([logPThreshold, max(chain['logpost'])- deltaLogP])
5834 print("Eliminating any samples before log(L) = {}".format(logPThreshold))
5835
5836 for i, chain in enumerate(chains):
5837 if deltaLogP is not None:
5838 above_threshold = np.arange(len(chain))[chain['logpost'] > logPThreshold]
5839 burnin_idx = above_threshold[0] if len(above_threshold) > 0 else len(chain)
5840 else:
5841 burnin_idx = 0
5842 chains[i] = chain[burnin_idx:]
5843
5844 samples = vstack(chains)
5845
5846 # Downsample one more time
5847 if nDownsample is not None:
5848 nskip = find_ndownsample(samples, nDownsample)
5849 samples = samples[::nskip]
5850
5851 return samples.colnames, as_array(samples).view(float).reshape(-1, len(samples.columns))
5852
5853 def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs):
5854 """
5855 Parse a HDF5 file and return an array of posterior samples ad list of
5856 parameter names. Equivalent to '_common_to_pos' and work in progress.
5857 """
5858 if not tablename:
5859 samples = read_samples(infile, tablename=posterior_grp_name)
5860 else:
5861 samples = read_samples(infile, tablename=tablename)
5862 params = samples.colnames
5863
5864 for param in params:
5865 param_low = param.lower()
5866 if param_low.find('log') != -1 and param_low not in logParams and re.sub('log', '', param_low) not in [p.lower() for p in params] and param_low not in lorentzInvarianceViolationParams:
5867 print('exponentiating %s' % param)
5868 new_param = re.sub('log', '', param, flags=re.IGNORECASE)
5869 samples[new_param] = np.exp(samples[param])
5870 del samples[param]
5871 param = new_param
5872 if param_low.find('sin') != -1 and re.sub('sin', '', param_low) not in [p.lower() for p in params]:
5873 print('asining %s' % param)
5874 new_param = re.sub('sin', '', param, flags=re.IGNORECASE)
5875 samples[new_param] = np.arcsin(samples[param])
5876 del samples[param]
5877 param = new_param
5878 if param_low.find('cos') != -1 and re.sub('cos', '', param_low) not in [p.lower() for p in params]:
5879 print('acosing %s' % param)
5880 new_param = re.sub('cos', '', param, flags=re.IGNORECASE)
5881 samples[new_param] = np.arccos(samples[param])
5882 del samples[param]
5883 param = new_param
5884
5885 if param != param.replace('(', ''):
5886 samples.rename_column(param, param.replace('(', ''))
5887 if param != param.replace(')', ''):
5888 samples.rename_column(param, param.replace(')', ''))
5889
5890 #Make everything a float, since that's what's excected of a CommonResultsObj
5891 replace_column(samples, param, samples[param].astype(float))
5892
5893 params = samples.colnames
5894 print('Read columns %s' % str(params))
5895
5896 # MCMC burnin and downsampling
5897 if 'cycle' in params:
5898 if not (fixedBurnin is None):
5899 if not (deltaLogP is None):
5900 print("Warning: using deltaLogP criteria in addition to fixed burnin")
5901 print("Fixed burning criteria: ",fixedBurnin)
5902 else:
5903 fixedBurnin = 0
5904
5905 burned_in_cycles = np.arange(len(samples))[samples['cycle'] > fixedBurnin]
5906 burnin_idx = burned_in_cycles[0] if len(burned_in_cycles) > 0 else len(samples)
5907 samples = samples[burnin_idx:]
5908
5909 logPThreshold=-np.inf
5910 if len(samples) > 0 and not (deltaLogP is None):
5911 logPThreshold = max(samples['logpost'])- deltaLogP
5912 print("Eliminating any samples before log(post) = ", logPThreshold)
5913 burnin_idx = np.arange(len(samples))[samples['logpost'] > logPThreshold][0]
5914 samples = samples[burnin_idx:]
5915
5916 if len(samples) > 0:
5917 nskip = find_ndownsample(samples, nDownsample)
5918 if nDownsample is None:
5919 print("Downsampling to take only uncorrelated posterior samples from each file.")
5920 if np.isnan(nskip) and not multiple_chains:
5921 print("WARNING: All samples in chain are correlated. Downsampling to 10000 samples for inspection!!!")
5922 nskip = find_ndownsample(samples, 10000)
5923 samples = samples[::nskip]
5924 else:
5925 if np.isnan(nskip):
5926 print("WARNING: All samples in {} are correlated.".format(infile))
5927 samples = samples[-1:]
5928 else:
5929 print("Downsampling by a factor of ", nskip, " to achieve approximately ", nDownsample, " posterior samples")
5930 samples = samples[::nskip]
5931
5932 return samples
5933
5934 def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs):
5935 samples = self._hdf5_to_table(infile, fixedBurnin=fixedBurnins, deltaLogP=deltaLogP, nDownsample=nDownsample, tablename=tablename, **kwargs)
5936
5937 return samples.colnames, as_array(samples).view(float).reshape(-1, len(samples.columns))
5938
5939 def _common_to_pos(self,infile,info=[None,None]):
5940 """
5941 Parse a file in the 'common format' and return an array of posterior
5942 samples and list of parameter names. Will apply inverse functions to
5943 columns with names containing sin,cos,log.
5944 """
5945
5946 [headerfile,delimiter]=info
5947
5948 if headerfile==None:
5949 formatstr=infile.readline().lstrip()
5950 else:
5951 hf=open(headerfile,'r')
5952 formatstr=hf.readline().lstrip()
5953 hf.close()
5954
5955 formatstr=formatstr.replace('#','')
5956 formatstr=formatstr.replace('"','')
5957
5958 header=formatstr.split(delimiter)
5959 header[-1]=header[-1].rstrip('\n')
5960 nparams=len(header)
5961 llines=[]
5962 dec=re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$|^inf$')
5963
5964 for line_number,line in enumerate(infile):
5965 sline=line.split(delimiter)
5966 if sline[-1] == '\n':
5967 del(sline[-1])
5968 proceed=True
5969 if len(sline)<1:
5970 print('Ignoring empty line in input file: %s'%(sline))
5971 proceed=False
5972 elif len(sline)!=nparams:
5973 sys.stderr.write('WARNING: Malformed row %i, read %i elements but there is meant to be %i\n'%(line_number,len(sline),nparams))
5974 proceed=False
5975
5976 for elemn,st in enumerate(sline):
5977 s=st.replace('\n','')
5978 if dec.search(s) is None:
5979 print('Warning! Ignoring non-numeric data after the header: %s. Row = %i,Element=%i'%(s,line_number,elemn))
5980 proceed=False
5981 elif s == '\n':
5982 proceed=False
5983
5984 if proceed:
5985 llines.append(list(map(float,sline)))
5986
5987 flines=np.array(llines)
5988
5989 if not flines.any():
5990 raise RuntimeError("ERROR: no lines read in!")
5991
5992
5993 for i in range(0,len(header)):
5994 if header[i].lower().find('log')!=-1 and header[i].lower() not in logParams and re.sub('log', '', header[i].lower()) not in [h.lower() for h in header] and header[i].lower() not in lorentzInvarianceViolationParams:
5995 print('exponentiating %s'%(header[i]))
5996
5997 flines[:,i]=np.exp(flines[:,i])
5998
5999 header[i]=re.sub('log', '', header[i], flags=re.IGNORECASE)
6000 if header[i].lower().find('sin')!=-1 and re.sub('sin', '', header[i].lower()) not in [h.lower() for h in header]:
6001 print('asining %s'%(header[i]))
6002 flines[:,i]=np.arcsin(flines[:,i])
6003 header[i]=re.sub('sin', '', header[i], flags=re.IGNORECASE)
6004 if header[i].lower().find('cos')!=-1 and re.sub('cos', '', header[i].lower()) not in [h.lower() for h in header]:
6005 print('acosing %s'%(header[i]))
6006 flines[:,i]=np.arccos(flines[:,i])
6007 header[i]=re.sub('cos', '', header[i], flags=re.IGNORECASE)
6008 header[i]=header[i].replace('(','')
6009 header[i]=header[i].replace(')','')
6010 print('Read columns %s'%(str(header)))
6011 return header,flines
6012
6013
6015 result={}
6016 lines=fo.split('\n')
6017 chain_line=False
6018 for line in lines:
6019
6020 if '[1]' in line:
6021 key=line.replace('[1]','').strip(' ').strip('"')
6022 result[key]={}
6023 out=result[key]
6024 continue
6025 if result is not {}:
6026 if 'chain' in line:
6027 chain_line=True
6028 continue
6029 if chain_line:
6030 chain_line=False
6031 key=line.strip('"').split()[1]
6032 out[key]=[]
6033 out2=out[key]
6034 else:
6035 try:
6036 newline=line.strip('"').split()
6037 if newline is not []:
6038 out2.append(line.strip('"').split())
6039 except:
6040 pass
6041
6042 return result
6043#
6044
6045def vo_nest2pos(nsresource,Nlive=None):
6046 """
6047 Parse a VO Table RESOURCE containing nested sampling output and
6048 return a VOTable TABLE element with posterior samples in it.
6049 This can be added to an existing tree by the user.
6050 Nlive will be read from the nsresource, unless specified
6051 """
6052 from xml.etree import ElementTree as ET
6053 import copy
6054 from math import log, exp
6055 xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
6056 try:
6057 register_namespace=ET.register_namespace
6058 except AttributeError:
6059 def register_namespace(prefix,uri):
6060 ET._namespace_map[uri]=prefix
6061 register_namespace('vot',xmlns)
6062
6063 postable=ET.Element("{%s}TABLE"%(xmlns),attrib={'name':'Posterior Samples','utype':'lalinference:results:posteriorsamples'})
6064 i=0
6065 nstables=[resource for resource in nsresource.findall("./{%s}TABLE"%(xmlns)) if resource.get("utype")=="lalinference:results:nestedsamples"]
6066
6067 nstable=nstables[0]
6068 if Nlive is None:
6069 runstateResource = [resource for resource in nsresource.findall("./{%s}RESOURCE"%(xmlns)) if resource.get("utype")=="lalinference:state"][0]
6070 algTable = [table for table in runstateResource.findall("./{%s}TABLE"%(xmlns)) if table.get("utype")=="lalinference:state:algorithmparams"][0]
6071 Nlive = int ([param for param in algTable.findall("./{%s}PARAM"%(xmlns)) if param.get("name")=='Nlive'][0].get('value'))
6072 print('Found Nlive %i'%(Nlive))
6073 if Nlive is None:
6074 raise RuntimeError("Cannot find number of live points in XML table, please specify")
6075 logLcol = None
6076 for fieldnode in nstable.findall('./{%s}FIELD'%xmlns):
6077 if fieldnode.get('name') == 'logL':
6078 logLcol=i
6079 i=i+1
6080 postable.append(copy.deepcopy(fieldnode))
6081 for paramnode in nstable.findall('./{%s}PARAM'%(xmlns)):
6082 postable.append(copy.deepcopy(paramnode))
6083 if logLcol is None:
6084 RuntimeError("Unable to find logL column")
6085 posdataNode=ET.Element("{%s}DATA"%(xmlns))
6086 postabledataNode=ET.Element("{%s}TABLEDATA"%(xmlns))
6087 postable.append(posdataNode)
6088 posdataNode.append(postabledataNode)
6089 nstabledata=nstable.find('./{%s}DATA/{%s}TABLEDATA'%(xmlns,xmlns))
6090 logw=log(1.0 - exp(-1.0/float(Nlive)))
6091 weights=[]
6092 for row in nstabledata:
6093 logL=float(row[logLcol].text)
6094 weights.append(logL-logw)
6095 logw=logw-1.0/float(Nlive)
6096 mw=max(weights)
6097 weights = [w - mw for w in weights]
6098 for (row,weight) in zip(nstabledata,weights):
6099 if weight > log(random.random()):
6100 postabledataNode.append(copy.deepcopy(row))
6101 return postable
6102
6103xmlns='http://www.ivoa.net/xml/VOTable/v1.1'
6104
6106 def __init__(self):
6107 self.html=htmlSection("VOTable information")
6109
6110 def start(self,tag,attrib):
6111 if tag=='{%s}TABLE'%(xmlns):
6112 if attrib['utype']=='lalinference:results:nestedsamples'\
6113 or attrib['utype']=='lalinference:results:posteriorsamples':
6114 self.skiptable=1
6115 else:
6116 self.skiptable=0
6118 self.tableouter.h2(attrib['name'])
6119 try:
6120 self.tableouter.p(attrib['utype'])
6121 except KeyError:
6122 pass
6123 self.fixedparams=htmlChunk('table',attrib={'class':'statstable'},parent=self.tableouter)
6124 self.table=htmlChunk('table',attrib={'class':'statstable'},parent=self.tableouter)
6125 self.tabheader=htmlChunk('tr',parent=self.table)
6126 if tag=='{%s}FIELD'%(xmlns):
6127 self.field=htmlChunk('th',{'name':attrib['name']},parent=self.tabheader)
6128 if tag=='{%s}TR'%(xmlns):
6129 self.tabrow=htmlChunk('tr',parent=self.table)
6130 if tag=='{%s}TD'%(xmlns):
6131 self.td=htmlChunk('td',parent=self.tabrow)
6132 if tag=='{%s}PARAM'%(xmlns):
6133 pnode=htmlChunk('tr',parent=self.fixedparams)
6134 namenode=htmlChunk('td',parent=pnode)
6135 namenode.p(attrib['name'])
6136 valnode=htmlChunk('td',parent=pnode)
6137 valnode.p(attrib['value'])
6138
6139 def end(self,tag):
6140 if tag=='{%s}TABLE'%(xmlns):
6141 if not self.skiptable:
6142 self.html.append(self.tableouter._html)
6143 if tag=='{%s}FIELD'%(xmlns):
6144 self.field.p(self.datadata)
6145 if tag=='{%s}TD'%(xmlns):
6146 self.td.p(self.datadata)
6147
6148 def data(self,data):
6149 self.datadata=data
6150
6151 def close(self):
6152 return self.html.toprettyxml()
6153
6154def _cl_width(cl_bound):
6155 """Returns (high - low), the width of the given confidence
6156 bounds."""
6157
6158 return cl_bound[1] - cl_bound[0]
6159
6160def _cl_count(cl_bound, samples):
6161 """Returns the number of samples within the given confidence
6162 bounds."""
6163
6164 return np.sum((samples >= cl_bound[0]) & (samples <= cl_bound[1]))
6165
6166def confidence_interval_uncertainty(cl, cl_bounds, posteriors):
6167 """Returns a tuple (relative_change, fractional_uncertainty,
6168 percentile_uncertainty) giving the uncertainty in confidence
6169 intervals from multiple posteriors.
6170
6171 The uncertainty in the confidence intervals is the difference in
6172 length between the widest interval, formed from the smallest to
6173 largest values among all the cl_bounds, and the narrowest
6174 interval, formed from the largest-small and smallest-large values
6175 among all the cl_bounds. Note that neither the smallest nor the
6176 largest confidence intervals necessarily correspond to one of the
6177 cl_bounds.
6178
6179 The relative change relates the confidence interval uncertainty to
6180 the expected value of the parameter, the fractional uncertainty
6181 relates this length to the length of the confidence level from the
6182 combined posteriors, and the percentile uncertainty gives the
6183 change in percentile over the combined posterior between the
6184 smallest and largest confidence intervals.
6185
6186 @param cl The confidence level (between 0 and 1).
6187
6188 @param cl_bounds A list of (low, high) pairs giving the confidence
6189 interval associated with each posterior.
6190
6191 @param posteriors A list of PosteriorOneDPDF objects giving the
6192 posteriors."""
6193
6194 Ns=[p.samples.shape[0] for p in posteriors]
6195 Nsamplers=len(Ns)
6196
6197 # Weight each sample within a run equally, and each run equally
6198 # with respect to the others
6199 all_samples = np.squeeze(np.concatenate([p.samples for p in posteriors], axis=0))
6200 weights = np.squeeze(np.concatenate([p.samples*0.0+1.0/(Nsamplers*N) for (N,p) in zip(Ns,posteriors)], axis=0))
6201
6202 isort=np.argsort(all_samples)
6203
6204 all_samples = all_samples[isort]
6205 weights = weights[isort]
6206
6207 param_mean = np.average(all_samples, weights=weights)
6208
6209 N=all_samples.shape[0]
6210
6211 alpha = (1.0 - cl)/2.0
6212
6213 wttotal = np.cumsum(weights)
6214 ilow = np.nonzero(wttotal >= alpha)[0][0]
6215 ihigh = np.nonzero(wttotal >= 1.0-alpha)[0][0]
6216
6217 all_cl_bound = (all_samples[ilow], all_samples[ihigh])
6218
6219 low_bounds = np.array([l for (l,h) in cl_bounds])
6220 high_bounds = np.array([h for (l,h) in cl_bounds])
6221
6222 largest_cl_bound = (np.min(low_bounds), np.max(high_bounds))
6223 smallest_cl_bound = (np.max(low_bounds), np.min(high_bounds))
6224
6225 if smallest_cl_bound[1] < smallest_cl_bound[0]:
6226 # Then the smallest CL is NULL
6227 smallest_cl_bound = (0.0, 0.0)
6228
6229 ci_uncertainty = _cl_width(largest_cl_bound) - _cl_width(smallest_cl_bound)
6230
6231 relative_change = ci_uncertainty/param_mean
6232
6233 frac_uncertainty = ci_uncertainty/_cl_width(all_cl_bound)
6234
6235 quant_uncertainty = float(_cl_count(largest_cl_bound, all_samples) - _cl_count(smallest_cl_bound, all_samples))/float(N)
6236
6237 return (relative_change, frac_uncertainty, quant_uncertainty)
6238
6239
6240def plot_waveform(pos=None,siminspiral=None,event=0,path=None,ifos=['H1','L1','V1']):
6241 #import sim inspiral table content handler
6242 from igwn_ligolw import lsctables,ligolw
6243 from lalsimulation import SimInspiralChooseTDWaveform,SimInspiralChooseFDWaveform
6244 from lalsimulation import SimInspiralImplementedTDApproximants,SimInspiralImplementedFDApproximants
6245 from lal import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT
6246 from lal import ComputeDetAMResponse, GreenwichMeanSiderealTime
6247 from lal import LIGOTimeGPS
6248 from lal import MSUN_SI as LAL_MSUN_SI
6249 from lal import PC_SI as LAL_PC_SI
6250 import lalsimulation as lalsim
6251 from math import cos,sin,sqrt
6252 from igwn_ligolw import utils
6253 import os
6254 import numpy as np
6255 from numpy import arange
6256 if path is None:
6257 path=os.getcwd()
6258 if event is None:
6259 event=0
6260 colors_inj={'H1':'r','L1':'g','V1':'m','I1':'b','J1':'y'}
6261 colors_rec={'H1':'k','L1':'k','V1':'k','I1':'k','J1':'k'}
6262 # time and freq data handling variables
6263 srate=4096.0
6264 seglen=60.
6265 length=srate*seglen # lenght of 60 secs, hardcoded. May call a LALSimRoutine to get an idea
6266 deltaT=1/srate
6267 deltaF = 1.0 / (length* deltaT);
6268
6269 # build window for FFT
6270 pad=0.4
6271 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(int(length), 1 );
6272 window=CreateTukeyREAL8Window(int(length),2.0*pad*srate/length);
6273 WinNorm = sqrt(window.sumofsquares/window.data.length);
6274 # time and freq domain strain:
6275 segStart=100000000
6276 strainT=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6277 strainF= CreateCOMPLEX16FrequencySeries("strainF",segStart, 0.0, deltaF, DimensionlessUnit,int(length/2. +1));
6278
6279 f_min=25 # hardcoded default (may be changed below)
6280 f_ref=100 # hardcoded default (may be changed below)
6281 f_max=srate/2.0
6282 plot_fmax=f_max
6283
6284 inj_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6285 rec_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6286
6287 inj_domain=None
6288 rec_domain=None
6289 font_size=26
6290 if siminspiral is not None:
6291 skip=0
6292 try:
6293 xmldoc = utils.load_filename(siminspiral)
6294 tbl = lsctables.SimInspiralTable.get_table(xmldoc)
6295 if event>0:
6296 tbl=tbl[event]
6297 else:
6298 tbl=tbl[0]
6299 except:
6300 e = sys.exc_info()[0]
6301 print(e)
6302 print("Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,siminspiral))
6303 skip=1
6304 if not skip:
6305 REAL8time=tbl.geocent_end_time+1e-9*tbl.geocent_end_time_ns
6306 GPStime=LIGOTimeGPS(REAL8time)
6307 M1=tbl.mass1
6308 M2=tbl.mass2
6309 D=tbl.distance
6310 m1=M1*LAL_MSUN_SI
6311 m2=M2*LAL_MSUN_SI
6312 phiRef=tbl.coa_phase
6313
6314 f_min = tbl.f_lower
6315 s1x = tbl.spin1x
6316 s1y = tbl.spin1y
6317 s1z = tbl.spin1z
6318 s2x = tbl.spin2x
6319 s2y = tbl.spin2y
6320 s2z = tbl.spin2z
6321
6322 r=D*LAL_PC_SI*1.0e6
6323 iota=tbl.inclination
6324 print("WARNING: Defaulting to inj_fref =100Hz to plot the injected WF. This is hardcoded since xml table does not carry this information\n")
6325
6326 lambda1=0
6327 lambda2=0
6328 wf=str(tbl.waveform)
6329
6330 injapproximant=lalsim.GetApproximantFromString(wf)
6331 amplitudeO=int(tbl.amp_order )
6332 phaseO=lalsim.GetOrderFromString(wf)
6333
6334 waveFlags=lal.CreateDict()
6335 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6336 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6337
6338 ra=tbl.longitude
6339 dec=tbl.latitude
6340 psi=tbl.polarization
6341
6342 if SimInspiralImplementedFDApproximants(injapproximant):
6343 inj_domain='F'
6344 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6345 0, 0, 0, # Non-circular binary parameters
6346 deltaF, f_min, f_max, f_ref,
6347 waveFlags, injapproximant)
6348 elif SimInspiralImplementedTDApproximants(injapproximant):
6349 inj_domain='T'
6350 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6351 0, 0, 0, # Non-circular binary parameters
6352 deltaT, f_min, f_ref,
6353 waveFlags, injapproximant)
6354 else:
6355 print("\nThe approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%injapproximant)
6356 return None
6357
6358 for ifo in ifos:
6359 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6360 if inj_domain=='T':
6361 # strain is a temporary container for this IFO strain.
6362 # Take antenna pattern into accout and window the data
6363 for k in np.arange(strainT.data.length):
6364 if k<plus.data.length:
6365 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6366 else:
6367 strainT.data.data[k]=0.0
6368 strainT.data.data[k]*=window.data.data[k]
6369 # now copy in the dictionary only the part of strain which is not null (that is achieved using plus.data.length as length)
6370 inj_strains[ifo]["T"]['strain']=np.array([strainT.data.data[k] for k in arange(plus.data.length)])
6371 inj_strains[ifo]["T"]['x']=np.array([REAL8time - deltaT*(plus.data.length-1-k) for k in np.arange(plus.data.length)])
6372
6373 # Take the FFT
6374 for j in arange(strainF.data.length):
6375 strainF.data.data[j]=0.0
6376 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6377 for j in arange(strainF.data.length):
6378 strainF.data.data[j]/=WinNorm
6379 # copy in the dictionary
6380 inj_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6381 inj_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6382 elif inj_domain=='F':
6383 for k in np.arange(strainF.data.length):
6384 if k<plus.data.length:
6385 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6386 else:
6387 strainF.data.data[k]=0.0
6388 # copy in the dictionary
6389 inj_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6390 inj_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6391 if pos is not None:
6392
6393 # Select the maxP sample
6394 _,which=pos._posMap()
6395
6396 if 'time' in pos.names:
6397 REAL8time=pos['time'].samples[which][0]
6398 elif 'time_maxl' in pos.names:
6399 REAL8time=pos['time_maxl'].samples[which][0]
6400 elif 'time_min' in pos.names and 'time_max' in pos.names:
6401 REAL8time=pos['time_min'].samples[which][0]+0.5*(pos['time_max'].samples[which][0]-pos['time_min'].samples[which][0])
6402 else:
6403 print("ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
6404 return None
6405
6406 # first check we have approx in posterior samples, otherwise skip
6407 skip=0
6408 try:
6409 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
6410 amplitudeO=int(pos['LAL_AMPORDER'].samples[which][0])
6411 phaseO=int(pos['LAL_PNORDER'].samples[which][0])
6412 except:
6413 skip=1
6414 if skip==0:
6415 GPStime=LIGOTimeGPS(REAL8time)
6416
6417 q=pos['q'].samples[which][0]
6418 mc=pos['mc'].samples[which][0]
6419 M1,M2=q2ms(mc,q)
6420 if 'dist' in pos.names:
6421 D=pos['dist'].samples[which][0]
6422 elif 'distance' in pos.names:
6423 D=pos['distance'].samples[which][0]
6424 elif 'logdistance' in pos.names:
6425 D=np.exp(pos['distance'].samples[which][0])
6426
6427 m1=M1*LAL_MSUN_SI
6428 m2=M2*LAL_MSUN_SI
6429 if 'phi_orb' in pos.names:
6430 phiRef=pos['phi_orb'].samples[which][0]
6431 elif 'phase' in pos.names:
6432 phiRef=pos['phase'].samples[which][0]
6433 elif 'phase_maxl' in pos.names:
6434 phiRef=pos['phase_maxl'].samples[which][0]
6435 print('INFO: phi_orb not estimated, using maximum likelihood value')
6436 else:
6437 print('WARNING: phi_orb not found in posterior files. Defaulting to 0.0 which is probably *not* what you want\n')
6438 phiRef=0.0
6439
6440 try:
6441 for name in ['flow','f_lower']:
6442 if name in pos.names:
6443 f_min=pos[name].samples[which][0]
6444 except:
6445 pass
6446
6447 try:
6448 for name in ['fref','f_ref','f_Ref','fRef']:
6449 if name in pos.names:
6450 fname=name
6451
6452 Fref = np.unique(pos[fname].samples)
6453 if len(Fref) > 1:
6454 print("ERROR: Expected f_ref to be constant for all samples. Can't tell which value was injected! Defaulting to 100 Hz\n")
6455 print(Fref)
6456 else:
6457 f_ref = Fref[0]
6458 except ValueError:
6459 print("WARNING: Could not read fref from posterior file! Defaulting to 100 Hz\n")
6460
6461 try:
6462 a = pos['a1'].samples[which][0]
6463 the = pos['theta_spin1'].samples[which][0]
6464 phi = pos['phi_spin1'].samples[which][0]
6465 s1x = (a * sin(the) * cos(phi));
6466 s1y = (a * sin(the) * sin(phi));
6467 s1z = (a * cos(the));
6468 a = pos['a2'].samples[which][0]
6469 the = pos['theta_spin2'].samples[which][0]
6470 phi = pos['phi_spin2'].samples[which][0]
6471 s2x = (a * sin(the) * cos(phi));
6472 s2y = (a * sin(the) * sin(phi));
6473 s2z = (a * cos(the));
6474 iota=pos['inclination'].samples[which][0]
6475 except:
6476 try:
6477 iota, s1x, s1y, s1z, s2x, s2y, s2z=lalsim.SimInspiralTransformPrecessingNewInitialConditions(pos['theta_jn'].samples[which][0], pos['phi_JL'].samples[which][0], pos['tilt1'].samples[which][0], pos['tilt2'].samples[which][0], pos['phi12'].samples[which][0], pos['a1'].samples[which][0], pos['a2'].samples[which][0], m1, m2, f_ref, phiRef)
6478 except:
6479 if 'a1z' in pos.names:
6480 s1z=pos['a1z'].samples[which][0]
6481 elif 'a1' in pos.names:
6482 s1z=pos['a1'].samples[which][0]
6483 else:
6484 s1z=0
6485 if 'a2z' in pos.names:
6486 s2z=pos['a2z'].samples[which][0]
6487 elif 'a2' in pos.names:
6488 s2z=pos['a2'].samples[which][0]
6489 else:
6490 s2z=0
6491 s1x=s1y=s2x=s2y=0.0
6492 if 'inclination' in pos.names:
6493 iota=pos['inclination'].samples[which][0]
6494 else:
6495 iota=pos['theta_jn'].samples[which][0]
6496
6497 r=D*LAL_PC_SI*1.0e6
6498
6499 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
6500 amplitudeO=int(pos['LAL_AMPORDER'].samples[which][0])
6501 phaseO=int(pos['LAL_PNORDER'].samples[which][0])
6502
6503 waveFlags=lal.CreateDict()
6504 lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(waveFlags, amplitudeO)
6505 lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(waveFlags, phaseO)
6506 if 'tideO' in pos.names:
6507 tidalO=int(pos['tideO'].samples[which][0])
6508 lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(waveFlags, tidalO)
6509 if 'spinO' in pos.names:
6510 spinO=int(pos['spinO'].samples[which][0])
6511 lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(waveFlags, spinO)
6512 if 'lambda1' in pos.names:
6513 lalsim.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, pos['lambda1'].samples[which][0])
6514 if 'lambda2' in pos.names:
6515 lalsim.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, pos['lambda2'].samples[which][0])
6516
6517 if SimInspiralImplementedFDApproximants(approximant):
6518 rec_domain='F'
6519 [plus,cross]=SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z,r, iota, phiRef,
6520 0, 0, 0, # Non-circular binary parameters
6521 deltaF, f_min, f_max, f_ref,
6522 waveFlags, approximant)
6523 elif SimInspiralImplementedTDApproximants(approximant):
6524 rec_domain='T'
6525 [plus,cross]=SimInspiralChooseTDWaveform(m1, m2, s1x, s1y, s1z,s2x,s2y,s2z, r, iota, phiRef,
6526 0, 0, 0, # Non-circular binary parameters
6527 deltaT, f_min, f_ref,
6528 waveFlags, approximant)
6529 else:
6530 print("The approximant %s doesn't seem to be recognized by lalsimulation!\n Skipping WF plots\n"%approximant)
6531 return None
6532
6533 ra=pos['ra'].samples[which][0]
6534 dec=pos['dec'].samples[which][0]
6535 psi=pos['psi'].samples[which][0]
6536 fs={}
6537 for ifo in ifos:
6538 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6539 if rec_domain=='T':
6540 # strain is a temporary container for this IFO strain.
6541 # Take antenna pattern into accout and window the data
6542 for k in np.arange(strainT.data.length):
6543 if k<plus.data.length:
6544 strainT.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6545 else:
6546 strainT.data.data[k]=0.0
6547 strainT.data.data[k]*=window.data.data[k]
6548 # now copy in the dictionary only the part of strain which is not null (that is achieved using plus.data.length as length)
6549 rec_strains[ifo]["T"]['strain']=np.array([strainT.data.data[k] for k in arange(plus.data.length)])
6550 rec_strains[ifo]["T"]['x']=np.array([REAL8time - deltaT*(plus.data.length-1-k) for k in np.arange(plus.data.length)])
6551
6552 # Take the FFT
6553 for j in arange(strainF.data.length):
6554 strainF.data.data[j]=0.0
6555 REAL8TimeFreqFFT(strainF,strainT,timeToFreqFFTPlan);
6556 for j in arange(strainF.data.length):
6557 strainF.data.data[j]/=WinNorm
6558 # copy in the dictionary
6559 rec_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6560 rec_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6561 elif rec_domain=='F':
6562 for k in np.arange(strainF.data.length):
6563 if k<plus.data.length:
6564 strainF.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6565 else:
6566 strainF.data.data[k]=0.0
6567 # copy in the dictionary
6568 rec_strains[ifo]["F"]['strain']=np.array([strainF.data.data[k] for k in arange(int(strainF.data.length))])
6569 rec_strains[ifo]["F"]['x']=np.array([strainF.f0+ k*strainF.deltaF for k in arange(int(strainF.data.length))])
6570
6571 myfig=plt.figure(1,figsize=(23,15))
6572
6573 rows=len(ifos)
6574 cols=2
6575
6576 #this variables decide which domain will be plotted on the left column of the plot.
6577 # only plot Time domain if both injections and recovery are TD
6578 global_domain="F"
6579 if rec_domain is not None and inj_domain is not None:
6580 if rec_domain=="T" and inj_domain=="T":
6581 global_domain="T"
6582 elif rec_domain is not None:
6583 if rec_domain=="T":
6584 global_domain="T"
6585 elif inj_domain is not None:
6586 if inj_domain=="T":
6587 global_domain="T"
6588
6589 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=False,sharey=False)
6590 plt.setp(A,figwidth=23,figheight=15)
6591 for (r,i) in zip(np.arange(rows),ifos):
6592 for c in np.arange(cols):
6593 ax=axes[r]
6594 if type(ax)==np.ndarray:
6595 ax=ax[c]
6596 else:
6597 ax=axes[c]
6598 if rec_strains[i]["T"]['strain'] is not None or rec_strains[i]["F"]['strain'] is not None:
6599 if c==0:
6600 if global_domain=="T":
6601 ax.plot(rec_strains[i]["T"]['x'],rec_strains[i]["T"]['strain'],colors_rec[i],alpha=0.5,label='%s maP'%i)
6602 else:
6603 data=rec_strains[i]["F"]['strain']
6604 f=rec_strains[i]["F"]['x']
6605 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6606 ys=data
6607 ax.semilogx(f[mask],ys[mask].real,'.-',color=colors_rec[i],alpha=0.5,label='%s maP'%i)
6608 else:
6609 data=rec_strains[i]["F"]['strain']
6610 f=rec_strains[i]["F"]['x']
6611 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6612 ys=data
6613 ax.loglog(f[mask],abs(ys[mask]),'--',color=colors_rec[i],alpha=0.5,linewidth=4)
6614 ax.set_xlim([min(f[mask]),max(f[mask])])
6615 ax.grid(True,which='both')
6616 if inj_strains[i]["T"]['strain'] is not None or inj_strains[i]["F"]['strain'] is not None:
6617 if c==0:
6618 if global_domain=="T":
6619 ax.plot(inj_strains[i]["T"]['x'],inj_strains[i]["T"]['strain'],colors_inj[i],alpha=0.5,label='%s inj'%i)
6620 else:
6621 data=inj_strains[i]["F"]['strain']
6622 f=inj_strains[i]["F"]['x']
6623 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6624 ys=data
6625 ax.plot(f[mask],ys[mask].real,'.-',color=colors_inj[i],alpha=0.5,label='%s inj'%i)
6626 else:
6627 data=inj_strains[i]["F"]['strain']
6628 f=inj_strains[i]["F"]['x']
6629 mask=np.logical_and(f>=f_min,f<=plot_fmax)
6630 ys=data
6631 ax.loglog(f[mask],abs(ys[mask]),'--',color=colors_inj[i],alpha=0.5,linewidth=4)
6632 ax.set_xlim([min(f[mask]),max(f[mask])])
6633 ax.grid(True,which='both')
6634
6635 if r==0:
6636 if c==0:
6637 if global_domain=="T":
6638 ax.set_title(r"$h(t)$",fontsize=font_size)
6639 else:
6640 ax.set_title(r"$\Re[h(f)]$",fontsize=font_size)
6641 else:
6642 ax.set_title(r"$|h(f)|$",fontsize=font_size)
6643 elif r==rows-1:
6644 if c==0:
6645 if global_domain=="T":
6646 ax.set_xlabel("time [s]",fontsize=font_size)
6647 else:
6648 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
6649 else:
6650 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
6651
6652 ax.legend(loc='best')
6653 ax.grid(True)
6654
6655 #ax.tight_layout()
6656 A.savefig(os.path.join(path,'WF_DetFrame.png'),bbox_inches='tight')
6657 return inj_strains,rec_strains
6658
6659
6660def plot_psd(psd_files,outpath=None,f_min=30.):
6661 myfig2=plt.figure(figsize=(15,15),dpi=500)
6662 ax=plt.subplot(1,1,1)
6663 colors={'H1':'r','L1':'g','V1':'m','I1':'k','J1':'y'}
6664
6665 if outpath is None:
6666 outpath=os.getcwd()
6667 tmp=[]
6668 for f in psd_files:
6669 if not os.path.isfile(f):
6670 print("PSD file %s has not been found and won't be plotted\n"%f)
6671 else:
6672 tmp.append(f)
6673 if tmp==[]:
6674 return None
6675 else:
6676 psd_files=tmp
6677
6678 freqs = {}
6679 for f in psd_files:
6680 data=np.loadtxt(f)
6681 freq=data[:,0]
6682 data=data[:,1]
6683 idx=f.find('-PSD.dat')
6684 ifo=f[idx-2:idx]
6685 freqs[ifo.lower()] = freq
6686 fr=[]
6687 da=[]
6688 for (f,d) in zip(freq,data):
6689 if f>f_min and d!=0.0 and np.isfinite(d):
6690 fr.append(f)
6691 da.append(d)
6692 plt.loglog(fr,da,colors[ifo],label=ifo,alpha=0.5,linewidth=3)
6693 plt.xlim([min(fr),max(fr)])
6694 plt.xlabel("Frequency [Hz]",fontsize=26)
6695 plt.ylabel("PSD",fontsize=26)
6696 plt.legend(loc='best')
6697 plt.grid(which='both')
6698 try:
6699 plt.tight_layout()
6700 myfig2.savefig(os.path.join(outpath,'PSD.png'),bbox_inches='tight')
6701 except:
6702 myfig2.savefig(os.path.join(outpath,'PSD.png'))
6703 myfig2.clf()
6704
6705 return freqs
6706
6707cred_level = lambda cl, x: np.sort(x, axis=0)[int(cl*len(x))]
6708
6709def cred_interval(x, cl=.9, lower=True):
6710 """Return location of lower or upper confidence levels
6711 Args:
6712 x: List of samples.
6713 cl: Confidence level to return the bound of.
6714 lower: If ``True``, return the lower bound, otherwise return the upper bound.
6715 """
6716 if lower:
6717 return cred_level((1.-cl)/2, x)
6718 else:
6719 return cred_level((1.+cl)/2, x)
6720
6721def spline_angle_xform(delta_psi):
6722 """Returns the angle in degrees corresponding to the spline
6723 calibration parameters delta_psi.
6724
6725 """
6726 rot = (2.0 + 1.0j*delta_psi)/(2.0 - 1.0j*delta_psi)
6727
6728 return 180.0/np.pi*np.arctan2(np.imag(rot), np.real(rot))
6729
6730def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None):
6731 """Plot calibration posterior estimates for a spline model in log space.
6732 Args:
6733 logf: The (log) location of spline control points.
6734 ys: List of posterior draws of function at control points ``logf``
6735 nx: Number of points to evaluate spline at for plotting.
6736 level: Credible level to fill in.
6737 color: Color to plot with.
6738 label: Label for plot.
6739 xform: Function to transform the spline into plotted values.
6740 """
6741 f = np.exp(logf)
6742 fs = np.linspace(f.min(), f.max(), nf)
6743
6744 data = np.zeros((ys.shape[0], nf))
6745
6746 if xform is None:
6747 zs = ys
6748 else:
6749 zs = xform(ys)
6750
6751 mu = np.mean(zs, axis=0)
6752 lower_cl = mu - cred_interval(zs, level, lower=True)
6753 upper_cl = cred_interval(zs, level, lower=False) - mu
6754 plt.errorbar(np.exp(logf), mu, yerr=[lower_cl, upper_cl], fmt='.', color=color, lw=4, alpha=0.5, capsize=0)
6755
6756 for i, samp in enumerate(ys):
6757 try:
6758 temp = interpolate.spline(logf, samp, np.log(fs))
6759 except AttributeError: # scipy < 0.19.0
6760 calSpline = interpolate.InterpolatedUnivariateSpline(logf, samp, k=3, ext=2) #cubic spline (k=3), raises ValueError in extrapolation
6761 temp = calSpline(np.log(fs))
6762 if xform is None:
6763 data[i] = temp
6764 else:
6765 data[i] = xform(temp)
6766
6767 line, = plt.plot(fs, np.mean(data, axis=0), color=color, label=label)
6768 color = line.get_color()
6769 plt.fill_between(fs, cred_interval(data, level), cred_interval(data, level, lower=False), color=color, alpha=.1, linewidth=0.1)
6770 plt.xlim(f.min()-.5, f.max()+50)
6771
6772def plot_calibration_pos(pos, level=.9, outpath=None):
6773 fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(15, 15), dpi=500)
6774
6775 font_size = 32
6776 if outpath is None:
6777 outpath=os.getcwd()
6778
6779 params = pos.names
6780 ifos = np.unique([param.split('_')[0] for param in params if 'spcal_freq' in param])
6781 for ifo in ifos:
6782 if ifo=='h1': color = 'r'
6783 elif ifo=='l1': color = 'g'
6784 elif ifo=='v1': color = 'm'
6785 else: color = 'c'
6786
6787 # Assume spline control frequencies are constant
6788 freq_params = np.sort([param for param in params if
6789 '{0}_spcal_freq'.format(ifo) in param])
6790
6791 logfreqs = np.log([pos[param].median for param in freq_params])
6792
6793 # Amplitude calibration model
6794 plt.sca(ax1)
6795 amp_params = np.sort([param for param in params if
6796 '{0}_spcal_amp'.format(ifo) in param])
6797 if len(amp_params) > 0:
6798 amp = 100*np.column_stack([pos[param].samples for param in amp_params])
6799 plot_spline_pos(logfreqs, amp, color=color, level=level, label="{0} (mean, {1}%)".format(ifo.upper(), int(level*100)))
6800
6801 # Phase calibration model
6802 plt.sca(ax2)
6803 phase_params = np.sort([param for param in params if
6804 '{0}_spcal_phase'.format(ifo) in param])
6805 if len(phase_params) > 0:
6806 phase = np.column_stack([pos[param].samples for param in phase_params])
6807
6808 plot_spline_pos(logfreqs, phase, color=color, level=level, label="{0} (mean, {1}%)".format(ifo.upper(), int(level*100)), xform=spline_angle_xform)
6809
6810 ax1.tick_params(labelsize=.75*font_size)
6811 ax2.tick_params(labelsize=.75*font_size)
6812 try:
6813 plt.legend(loc='upper right', prop={'size':.75*font_size}, framealpha=0.1)
6814 except:
6815 plt.legend(loc='upper right', prop={'size':.75*font_size})
6816 ax1.set_xscale('log')
6817 ax2.set_xscale('log')
6818
6819 ax2.set_xlabel('Frequency (Hz)', fontsize=font_size)
6820 ax1.set_ylabel('Amplitude (%)', fontsize=font_size)
6821 ax2.set_ylabel('Phase (deg)', fontsize=font_size)
6822
6823 outp = os.path.join(outpath, 'calibration.png')
6824 try:
6825 fig.tight_layout()
6826 fig.savefig(outp, bbox_inches='tight')
6827 except:
6828 fig.savefig(outp)
6829 plt.close(fig)
6830
6831
6832def plot_burst_waveform(pos=None,simburst=None,event=0,path=None,ifos=['H1','L1','V1']):
6833 from lalinference import SimBurstChooseFDWaveform,SimBurstChooseTDWaveform
6834 from lalinference import SimBurstImplementedFDApproximants,SimBurstImplementedTDApproximants
6835 from lal import CreateREAL8TimeSeries,CreateForwardREAL8FFTPlan,CreateTukeyREAL8Window,CreateCOMPLEX16FrequencySeries,DimensionlessUnit,REAL8TimeFreqFFT,CreateReverseREAL8FFTPlan
6836 from lal import LIGOTimeGPS
6837 import lalinference as lalinf
6838 from lal import ComputeDetAMResponse, GreenwichMeanSiderealTime, LIGOTimeGPS
6839
6840 from math import cos,sin,sqrt
6841 from igwn_ligolw import lsctables
6842 from igwn_ligolw import utils
6843 import os
6844 import numpy as np
6845 from numpy import arange,real,absolute,fabs,pi
6846 from matplotlib import pyplot as plt
6847 if path is None:
6848 path=os.getcwd()
6849 if event is None:
6850 event=0
6851 colors_inj={'H1':'r','L1':'g','V1':'m','I1':'b','J1':'y'}
6852 colors_rec={'H1':'k','L1':'k','V1':'k','I1':'k','J1':'k'}
6853 #import sim inspiral table content handler
6854 from igwn_ligolw import ligolw
6855 from igwn_ligolw import table
6856 class LIGOLWContentHandlerExtractSimBurstTable(ligolw.LIGOLWContentHandler):
6857 def __init__(self,document):
6858 ligolw.LIGOLWContentHandler.__init__(self,document)
6859 self.tabname=lsctables.SimBurstTable.tableName
6860 self.intable=False
6861 self.tableElementName=''
6862 def startElement(self,name,attrs):
6863 if attrs.has_key('Name') and table.Table.TableName(attrs['Name'])==self.tabname:
6864 self.tableElementName=name
6865 # Got the right table, let's see if it's the right event
6866 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6867 self.intable=True
6868 elif self.intable: # We are in the correct table
6869 ligolw.LIGOLWContentHandler.startElement(self,name,attrs)
6870 def endElement(self,name):
6871 if self.intable: ligolw.LIGOLWContentHandler.endElement(self,name)
6872 if self.intable and name==self.tableElementName: self.intable=False
6873
6874 # time and freq data handling variables
6875 srate=4096.0
6876 seglen=10.
6877 length=srate*seglen # lenght of 10 secs, hardcoded.
6878 deltaT=1/srate
6879 deltaF = 1.0 / (length* deltaT);
6880
6881 # build window for FFT
6882 pad=0.4
6883 timeToFreqFFTPlan = CreateForwardREAL8FFTPlan(int(length), 1 );
6884 freqToTimeFFTPlan = CreateReverseREAL8FFTPlan(int(length), 1 );
6885 window=CreateTukeyREAL8Window(int(length),2.0*pad*srate/length);
6886 # A random GPS time to initialize arrays. Epoch will be overwritten with sensible times further down
6887 segStart=939936910.000
6888 strainFinj= CreateCOMPLEX16FrequencySeries("strainF",segStart,0.0,deltaF,DimensionlessUnit,int(length/2. +1));
6889 strainTinj=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6890 strainFrec= CreateCOMPLEX16FrequencySeries("strainF",segStart,0.0,deltaF,DimensionlessUnit,int(length/2. +1));
6891 strainTrec=CreateREAL8TimeSeries("strainT",segStart,0.0,1.0/srate,DimensionlessUnit,int(length));
6892 GlobREAL8time=None
6893 f_min=25 # hardcoded default (may be changed below)
6894 f_ref=100 # hardcoded default (may be changed below)
6895 f_max=srate/2.0
6896
6897 plot_fmax=2048
6898 plot_fmin=0.01
6899 plot_tmin=1e11
6900 plot_tmax=-1e11
6901
6902 inj_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6903 rec_strains=dict((i,{"T":{'x':None,'strain':None},"F":{'x':None,'strain':None}}) for i in ifos)
6904
6905 inj_domain=None
6906 rec_domain=None
6907 font_size=26
6908 if simburst is not None:
6909 skip=0
6910 try:
6911 xmldoc = utils.load_filename(simburst,contenthandler=LIGOLWContentHandlerExtractSimBurstTable)
6912 tbl = lsctables.SimBurstTable.get_table(xmldoc)
6913 if event>0:
6914 tbl=tbl[event]
6915 else:
6916 tbl=tbl[0]
6917 except:
6918 print("Cannot read event %s from table %s. Won't plot injected waveform \n"%(event,simburst))
6919 skip=1
6920 if not skip:
6921 REAL8time=tbl.time_geocent_gps+1e-9*tbl.time_geocent_gps_ns
6922 GPStime=LIGOTimeGPS(REAL8time)
6923 GlobREAL8time=(REAL8time)
6924 strainTinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6925 strainFinj.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
6926 f0=tbl.frequency
6927 q=tbl.q
6928 dur=tbl.duration
6929 hrss=tbl.hrss
6930 polar_e_angle=tbl.pol_ellipse_angle
6931 polar_e_ecc=tbl.pol_ellipse_e
6932
6933 BurstExtraParams=None
6934 wf=str(tbl.waveform)
6935
6936 injapproximant=lalinf.GetBurstApproximantFromString(wf)
6937 ra=tbl.ra
6938 dec=tbl.dec
6939 psi=tbl.psi
6940
6941 if SimBurstImplementedFDApproximants(injapproximant):
6942 inj_domain='F'
6943 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6944 elif SimBurstImplementedTDApproximants(injapproximant):
6945 inj_domain='T'
6946 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, injapproximant)
6947 else:
6948 print("\nThe approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%injapproximant)
6949 return None
6950
6951 for ifo in ifos:
6952 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
6953 if inj_domain=='T':
6954 # bin of ref time as seen in strainT
6955 tCinstrain=np.floor(REAL8time-float(strainTinj.epoch))/deltaT
6956 # bin of strainT where we need to start copying the WF over
6957 #tSinstrain=floor(tCinstrain-float(plus.data.length)/2.)+1
6958 tSinstrain=int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT)
6959 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTinj.epoch)))/deltaT-tSinstrain
6960 # strain is a temporary container for this IFO strain.
6961 # Zero until tSinstrain
6962 for k in np.arange(tSinstrain):
6963 strainTinj.data.data[k]=0.0
6964 # then copy plus/cross over
6965 for k in np.arange(plus.data.length):
6966 strainTinj.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6967 # Then zeros till the end (superfluous)
6968 for k in np.arange(strainTinj.data.length- (tSinstrain +plus.data.length)):
6969 strainTinj.data.data[k+tSinstrain+plus.data.length]=0.0
6970 for k in np.arange(strainTinj.data.length):
6971 strainTinj.data.data[k]*=window.data.data[k]
6972 np.savetxt('file.out',zip(np.array([strainTinj.epoch + j*deltaT for j in arange(strainTinj.data.length)]),np.array([strainTinj.data.data[j] for j in arange(strainTinj.data.length)])))
6973 # now copy in the dictionary
6974 inj_strains[ifo]["T"]['strain']=np.array([strainTinj.data.data[j] for j in arange(strainTinj.data.length)])
6975 inj_strains[ifo]["T"]['x']=np.array([strainTinj.epoch + j*deltaT for j in arange(strainTinj.data.length)])
6976 # Take the FFT
6977 for j in arange(strainFinj.data.length):
6978 strainFinj.data.data[j]=0.0
6979 REAL8TimeFreqFFT(strainFinj,strainTinj,timeToFreqFFTPlan);
6980 twopit=2.*np.pi*(rem*deltaT)
6981 for k in arange(strainFinj.data.length):
6982 re = cos(twopit*deltaF*k)
6983 im = -sin(twopit*deltaF*k)
6984 strainFinj.data.data[k]*= (re + 1j*im);
6985 # copy in the dictionary
6986 inj_strains[ifo]["F"]['strain']=np.array([strainFinj.data.data[k] for k in arange(int(strainFinj.data.length))])
6987 inj_strains[ifo]["F"]['x']=np.array([strainFinj.f0+ k*strainFinj.deltaF for k in arange(int(strainFinj.data.length))])
6988 elif inj_domain=='F':
6989 for k in np.arange(strainFinj.data.length):
6990 if k<plus.data.length:
6991 strainFinj.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
6992 else:
6993 strainFinj.data.data[k]=0.0
6994 twopit=2.*np.pi*(REAL8time-float(strainFinj.epoch))
6995 for k in arange(strainFinj.data.length):
6996 re = cos(twopit*deltaF*k)
6997 im = -sin(twopit*deltaF*k)
6998 strainFinj.data.data[k]*= (re + 1j*im);
6999 # copy in the dictionary
7000 inj_strains[ifo]["F"]['strain']=np.array([strainFinj.data.data[k] for k in arange(int(strainFinj.data.length))])
7001 inj_strains[ifo]["F"]['x']=np.array([strainFinj.f0+ k*strainFinj.deltaF for k in arange(int(strainFinj.data.length))])
7002 #update xlimits for plot, go 6 sigmas left and right of f0
7003 # This should work for SineGaussians
7004 if f0 is not None and f0 is not np.nan:
7005 if q is not None and q is not np.nan:
7006 sigmaF=f0/q
7007 if f0-6.*sigmaF>plot_fmin:
7008 plot_fmin=f0-6.*sigmaF
7009 if f0+6.*sigmaF<plot_fmax:
7010 plot_fmax=f0+6.*sigmaF
7011 sigmaT=q/(2.*pi*f0)
7012 if REAL8time-6.*sigmaT<plot_tmin:
7013 plot_tmin=REAL8time-6.*sigmaT
7014 if REAL8time+6.*sigmaT>plot_tmax:
7015 plot_tmax=REAL8time+6.*sigmaT
7016 # Now handle gaussians. For gaussians f0 is nan (FD centered at f=0)
7017 if dur is not None and dur is not np.nan:
7018 sigmaF=1./sqrt(2.)/pi/dur
7019 if 0+6.*sigmaF<plot_fmax:
7020 plot_fmax=0+6.*sigmaF
7021 plot_fmin=0.0
7022 sigmaT=dur/sqrt(2.)
7023 if REAL8time-6.*sigmaT<plot_tmin:
7024 plot_tmin=REAL8time-6.*sigmaT
7025 if REAL8time+6.*sigmaT>plot_tmax:
7026 plot_tmax=REAL8time+6.*sigmaT
7027
7028
7029 if pos is not None:
7030
7031 # Select the maxP sample
7032 _,which=pos._posMap()
7033
7034 if 'time' in pos.names:
7035 REAL8time=pos['time'].samples[which][0]
7036 elif 'time_maxl' in pos.names:
7037 REAL8time=pos['time_maxl'].samples[which][0]
7038 elif 'time_mean' in pos.names:
7039 REAL8time=pos['time_mean'].samples[which][0]
7040 elif 'time_min' in pos.names and 'time_max' in pos.names:
7041 REAL8time=pos['time_min'].samples[which][0]+0.5*(pos['time_max'].samples[which][0]-pos['time_min'].samples[which][0])
7042 else:
7043 print("ERROR: could not find any time parameter in the posterior file. Not plotting the WF...\n")
7044 return None
7045
7046 # first check we have approx in posterior samples, otherwise skip
7047 skip=0
7048
7049 try:
7050 approximant=int(pos['LAL_APPROXIMANT'].samples[which][0])
7051 except:
7052 skip=1
7053 if skip==0:
7054 GPStime=LIGOTimeGPS(REAL8time)
7055 if GlobREAL8time is None:
7056 GlobREAL8time=REAL8time
7057 strainTrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7058 strainFrec.epoch=LIGOTimeGPS(round(GlobREAL8time,0)-seglen/2.)
7059 if "duration" in pos.names:
7060 dur=pos["duration"].samples[which][0]
7061 else:
7062 dur=np.nan
7063 if "quality" in pos.names:
7064 q=pos['quality'].samples[which][0]
7065 else:
7066 q=np.nan
7067 if 'frequency' in pos.names:
7068 f0=pos['frequency'].samples[which][0]
7069 else:
7070 f0=np.nan
7071 try:
7072 hrss=pos['hrss'].samples[which][0]
7073 except:
7074 hrss=np.exp(pos['loghrss'].samples[which][0])
7075 if np.isnan(q) and not np.isnan(dur):
7076 q=sqrt(2)*pi*dur
7077 alpha=None
7078 if 'alpha' in pos.names:
7079 alpha=pos['alpha'].samples[which][0]
7080 polar_e_angle=alpha
7081 polar_e_ecc=pos['polar_eccentricity'].samples[which][0]
7082 elif 'polar_ellipse_angle' in pos.names:
7083 polar_e_angle=pos['polar_ellipse_angle'].samples[which][0]
7084 polar_e_ecc=pos['polar_eccentricity'].samples[which][0]
7085
7086 BurstExtraParams=None
7087 #if alpha:
7088 # BurstExtraParams=lalsim.SimBurstCreateExtraParam("alpha",alpha)
7089
7090 if SimBurstImplementedFDApproximants(approximant):
7091 rec_domain='F'
7092 [plus,cross]=SimBurstChooseFDWaveform(deltaF, deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7093 elif SimBurstImplementedTDApproximants(approximant):
7094 rec_domain='T'
7095 [plus,cross]=SimBurstChooseTDWaveform(deltaT, f0, q,dur, f_min, f_max,hrss,polar_e_angle ,polar_e_ecc,BurstExtraParams, approximant)
7096 else:
7097 print("The approximant %s doesn't seem to be recognized by lalinference!\n Skipping WF plots\n"%approximant)
7098 return None
7099 ra=pos['ra'].samples[which][0]
7100 dec=pos['dec'].samples[which][0]
7101 psi=pos['psi'].samples[which][0]
7102 fs={}
7103 for ifo in ifos:
7104 fp, fc = ComputeDetAMResponse(lal.cached_detector_by_prefix[ifo].response, ra, dec, psi, GreenwichMeanSiderealTime(REAL8time))
7105 if rec_domain=='T':
7106 # bin of ref time as seen in strainT
7107 tCinstrain=np.floor(REAL8time-float(strainTrec.epoch))/deltaT
7108 # bin of strainT where we need to start copying the WF over
7109 tSinstrain=int( (REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT)
7110 #tSinstrain=floor(tCinstrain-float(plus.data.length)/2.)+1
7111 #reminder for fractions of bin, will be added back in the FD WF
7112 rem=(REAL8time-fabs(float(plus.epoch)) - fabs(float(strainTrec.epoch)))/deltaT-tSinstrain
7113
7114 # strain is a temporary container for this IFO strain.
7115 # Zero until tSinstrain
7116 for k in np.arange(tSinstrain):
7117 strainTrec.data.data[k]=0.0
7118 # then copy plus/cross over
7119 for k in np.arange(plus.data.length):
7120 strainTrec.data.data[k+tSinstrain]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7121 # Then zeros till the end (superfluous)
7122 for k in np.arange(strainTrec.data.length- (tSinstrain +plus.data.length)):
7123 strainTrec.data.data[k+tSinstrain+plus.data.length]=0.0
7124 for k in np.arange(strainTrec.data.length):
7125 strainTrec.data.data[k]*=window.data.data[k]
7126 # now copy in the dictionary
7127 rec_strains[ifo]["T"]['strain']=np.array([strainTrec.data.data[j] for j in arange(strainTrec.data.length)])
7128 rec_strains[ifo]["T"]['x']=np.array([strainTrec.epoch + j*deltaT for j in arange(strainTrec.data.length)])
7129 # Take the FFT
7130 for j in arange(strainFrec.data.length):
7131 strainFrec.data.data[j]=0.0
7132 REAL8TimeFreqFFT(strainFrec,strainTrec,timeToFreqFFTPlan);
7133 twopit=2.*np.pi*(rem*deltaT)
7134 for k in arange(strainFrec.data.length):
7135 re = cos(twopit*deltaF*k)
7136 im = -sin(twopit*deltaF*k)
7137 strainFrec.data.data[k]*= (re + 1j*im);
7138 # copy in the dictionary
7139 rec_strains[ifo]["F"]['strain']=np.array([strainFrec.data.data[k] for k in arange(int(strainFrec.data.length))])
7140 rec_strains[ifo]["F"]['x']=np.array([strainFrec.f0+ k*strainFrec.deltaF for k in arange(int(strainFrec.data.length))])
7141 elif rec_domain=='F':
7142 for k in np.arange(strainFrec.data.length):
7143 if k<plus.data.length:
7144 strainFrec.data.data[k]=((fp*plus.data.data[k]+fc*cross.data.data[k]))
7145 else:
7146 strainFrec.data.data[k]=0.0
7147 twopit=2.*np.pi*(REAL8time-float(strainFrec.epoch))
7148 for k in arange(strainFrec.data.length):
7149 re = cos(twopit*deltaF*k)
7150 im = -sin(twopit*deltaF*k)
7151 strainFrec.data.data[k]*= (re + 1j*im);
7152 # copy in the dictionary
7153 rec_strains[ifo]["F"]['strain']=np.array([strainFrec.data.data[k] for k in arange(int(strainFrec.data.length))])
7154 rec_strains[ifo]["F"]['x']=np.array([strainFrec.f0+ k*strainFrec.deltaF for k in arange(int(strainFrec.data.length))])
7155 #update xlimits for plot, go 6 sigmas left and right of f0
7156 # This should work for SineGaussians
7157 if f0 is not None and f0 is not np.nan:
7158 if q is not None and q is not np.nan:
7159 sigmaF=f0/q
7160 if f0-6.*sigmaF>plot_fmin:
7161 plot_fmin=f0-6.*sigmaF
7162 if f0+6.*sigmaF<plot_fmax:
7163 plot_fmax=f0+6.*sigmaF
7164 sigmaT=q/(2.*pi*f0)
7165 if REAL8time-6.*sigmaT<plot_tmin:
7166 plot_tmin=REAL8time-6.*sigmaT
7167 if REAL8time+6.*sigmaT>plot_tmax:
7168 plot_tmax=REAL8time+6.*sigmaT
7169 # Now handle gaussians. For gaussians f0 is nan (FD centered at f=0)
7170 if dur is not None and dur is not np.nan:
7171 sigmaF=1./sqrt(2.)/pi/dur
7172 if 0+6.*sigmaF<plot_fmax:
7173 plot_fmax=0+6.*sigmaF
7174 plot_fmin=0.0
7175 sigmaT=dur/sqrt(2.)
7176 if REAL8time-6.*sigmaT<plot_tmin:
7177 plot_tmin=REAL8time-6.*sigmaT
7178 if REAL8time+6.*sigmaT>plot_tmax:
7179 plot_tmax=REAL8time+6.*sigmaT
7180
7181 myfig=plt.figure(1,figsize=(10,7))
7182
7183 rows=len(ifos)
7184 cols=2
7185
7186 #this variables decide which domain will be plotted on the left column of the plot.
7187 # only plot Time domain if both injections and recovery are TD
7188 global_domain="F"
7189 if rec_domain is not None and inj_domain is not None:
7190 if rec_domain=="T" and inj_domain=="T":
7191 global_domain="T"
7192 elif rec_domain is not None:
7193 if rec_domain=="T":
7194 global_domain="T"
7195 elif inj_domain is not None:
7196 if inj_domain=="T":
7197 global_domain="T"
7198
7199 A,axes=plt.subplots(nrows=rows,ncols=cols,sharex=False,sharey=False)
7200 plt.setp(A,figwidth=10,figheight=7)
7201 for (r,i) in zip(np.arange(rows),ifos):
7202 for c in np.arange(cols):
7203 ax=axes[r]
7204 if type(ax)==np.ndarray:
7205 ax=ax[c]
7206 else:
7207 ax=axes[c]
7208 if rec_strains[i]["T"]['strain'] is not None or rec_strains[i]["F"]['strain'] is not None:
7209 if c==0:
7210 if global_domain=="T":
7211 ax.plot(rec_strains[i]["T"]['x'],rec_strains[i]["T"]['strain'],colors_rec[i],label='%s maP'%i,linewidth=5)
7212 ax.set_xlim([plot_tmin,plot_tmax])
7213 #ax.vlines(GlobREAL8time,0.9*min(rec_strains[i]["T"]['strain']),1.1*max(rec_strains[i]["T"]['strain']),'k')
7214 else:
7215 data=rec_strains[i]["F"]['strain']
7216 f=rec_strains[i]["F"]['x']
7217 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7218 ys=data
7219 ax.plot(f[mask],real(ys[mask]),'-',color=colors_rec[i],label='%s maP'%i,linewidth=5)
7220 ax.set_xlim([plot_fmin,plot_fmax])
7221 else:
7222 data=rec_strains[i]["F"]['strain']
7223 f=rec_strains[i]["F"]['x']
7224 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7225 ys=data
7226 ax.loglog(f[mask],absolute(ys[mask]),'--',color=colors_rec[i],linewidth=5)
7227 ax.grid(True,which='both')
7228 ax.set_xlim([plot_fmin,plot_fmax])
7229 if inj_strains[i]["T"]['strain'] is not None or inj_strains[i]["F"]['strain'] is not None:
7230 if c==0:
7231 if global_domain=="T":
7232 ax.plot(inj_strains[i]["T"]['x'],inj_strains[i]["T"]['strain'],colors_inj[i],label='%s inj'%i,linewidth=2)
7233 ax.set_xlim([plot_tmin,plot_tmax])
7234 else:
7235 data=inj_strains[i]["F"]['strain']
7236 f=inj_strains[i]["F"]['x']
7237 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7238 ys=data
7239 ax.plot(f[mask],real(ys[mask]),'-',color=colors_inj[i],label='%s inj'%i,linewidth=2)
7240 ax.set_xlim([plot_fmin,plot_fmax])
7241 else:
7242 data=inj_strains[i]["F"]['strain']
7243 f=inj_strains[i]["F"]['x']
7244 mask=np.logical_and(f>=plot_fmin,f<=plot_fmax)
7245 ys=data
7246 ax.loglog(f[mask],absolute(ys[mask]),'--',color=colors_inj[i],linewidth=2)
7247 ax.grid(True,which='both')
7248 ax.set_xlim([plot_fmin,plot_fmax])
7249 if r==0:
7250 if c==0:
7251 if global_domain=="T":
7252 ax.set_title(r"$h(t)$",fontsize=font_size)
7253 else:
7254 ax.set_title(r"$\Re[h(f)]$",fontsize=font_size)
7255 else:
7256 ax.set_title(r"$|h(f)|$",fontsize=font_size)
7257 elif r==rows-1:
7258 if c==0:
7259 if global_domain=="T":
7260 ax.set_xlabel("time [s]",fontsize=font_size)
7261 else:
7262 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
7263 else:
7264 ax.set_xlabel("frequency [Hz]",fontsize=font_size)
7265
7266 ax.legend(loc='best')
7267 ax.grid(True)
7268
7269 #ax.tight_layout()
7270 A.savefig(os.path.join(path,'WF_DetFrame.png'),bbox_inches='tight')
7271 return inj_strains, rec_strains
7272
7273def make_1d_table(html,legend,label,pos,pars,noacf,GreedyRes,onepdfdir,sampsdir,savepdfs,greedy,analyticLikelihood,nDownsample):
7274
7275 from numpy import unique, sort
7276 global confidenceLevels
7277 confidence_levels=confidenceLevels
7278
7279 out={}
7280 if pars==[]:
7281 return out
7282 if set(pos.names)-set(pars)==set(pos.names):
7283 return out
7284
7285 #2D plots list
7286 tabid='onedmargtable_'+label.lower()
7287 html_ompdf=html.add_collapse_section('1D marginal posterior PDFs (%s)'%label,legend=legend,innertable_id=tabid)
7288 #Table matter
7289 if not noacf:
7290 html_ompdf_write= '<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th><th>Autocorrelation</th></tr>'%tabid
7291 else:
7292 html_ompdf_write= '<table id="%s"><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>'%tabid
7293
7294 Nskip=0
7295 if 'chain' in pos.names:
7296 data,header=pos.samples()
7297 par_index=pos.names.index('cycle')
7298 chain_index=pos.names.index("chain")
7299 chains=unique(pos["chain"].samples)
7300 chainCycles = [sort(data[ data[:,chain_index] == chain, par_index ]) for chain in chains]
7301 chainNcycles = []
7302 chainNskips = []
7303 for cycles in chainCycles:
7304 if len(cycles) > 1:
7305 chainNcycles.append(cycles[-1] - cycles[0])
7306 chainNskips.append(cycles[1] - cycles[0])
7307 else:
7308 chainNcycles.append(1)
7309 chainNskips.append(1)
7310 elif 'cycle' in pos.names:
7311 cycles = sort(pos['cycle'].samples)
7312 if len(cycles) > 1:
7313 Ncycles = cycles[-1]-cycles[0]
7314 Nskip = cycles[1]-cycles[0]
7315 else:
7316 Ncycles = 1
7317 Nskip = 1
7318
7319 printed=0
7320 for par_name in pars:
7321 par_name=par_name.lower()
7322 try:
7323 pos[par_name.lower()]
7324 except KeyError:
7325 #print "No input chain for %s, skipping binning."%par_name
7326 continue
7327 try:
7328 par_bin=GreedyRes[par_name]
7329 except KeyError:
7330 print("Bin size is not set for %s, skipping binning."%par_name)
7331 continue
7332
7333 #print "Binning %s to determine confidence levels ..."%par_name
7334 binParams={par_name:par_bin}
7335 injection_area=None
7336 injection_area=None
7337 if greedy:
7338 if printed==0:
7339 print("Using greedy 1-d binning credible regions\n")
7340 printed=1
7341 toppoints,injectionconfidence,reses,injection_area,cl_intervals=greedy_bin_one_param(pos,binParams,confidence_levels)
7342 else:
7343 if printed==0:
7344 print("Using 2-step KDE 1-d credible regions\n")
7345 printed=1
7346 if pos[par_name].injval is None:
7347 injCoords=None
7348 else:
7349 injCoords=[pos[par_name].injval]
7350 _,reses,injstats=kdtree_bin2Step(pos,[par_name],confidence_levels,injCoords=injCoords)
7351 if injstats is not None:
7352 injectionconfidence=injstats[3]
7353 injection_area=injstats[4]
7354 #Generate 1D histogram/kde plots
7355 print("Generating 1D plot for %s."%par_name)
7356 out[par_name]=reses
7357 #Get analytic description if given
7358 pdf=cdf=None
7359 if analyticLikelihood:
7360 pdf = analyticLikelihood.pdf(par_name)
7361 cdf = analyticLikelihood.cdf(par_name)
7362
7363 oneDPDFParams={par_name:50}
7364 try:
7365 rbins,plotFig=plot_one_param_pdf(pos,oneDPDFParams,pdf,cdf,plotkde=False)
7366 except Exception as exc:
7367 warnings.warn(
7368 f"failed to produce plot for {par_name}: "
7369 f"[{type(exc).__name__}] {exc}",
7370 )
7371 continue
7372
7373 figname=par_name+'.png'
7374 oneDplotPath=os.path.join(onepdfdir,figname)
7375 plotFig.savefig(oneDplotPath)
7376 if(savepdfs): plotFig.savefig(os.path.join(onepdfdir,par_name+'.pdf'))
7377 plt.close(plotFig)
7378
7379 if rbins:
7380 print("r of injected value of %s (bins) = %f"%(par_name, rbins))
7381
7382 ##Produce plot of raw samples
7383 myfig=plt.figure(figsize=(4,3.5),dpi=200)
7384 pos_samps=pos[par_name].samples
7385 if not ("chain" in pos.names):
7386 # If there is not a parameter named "chain" in the
7387 # posterior, then just produce a plot of the samples.
7388 plt.plot(pos_samps,'k.', markersize=5, alpha=0.5, linewidth=0.0, figure=myfig)
7389 maxLen=len(pos_samps)
7390 else:
7391 # If there is a parameter named "chain", then produce a
7392 # plot of the various chains in different colors, with
7393 # smaller dots.
7394 data,header=pos.samples()
7395 par_index=pos.names.index(par_name)
7396 chain_index=pos.names.index("chain")
7397 chains=unique(pos["chain"].samples)
7398 chainData=[data[ data[:,chain_index] == chain, par_index ] for chain in chains]
7399 chainDataRanges=[range(len(cd)) for cd in chainData]
7400 maxLen=max([len(cd) for cd in chainData])
7401 for rng, data in zip(chainDataRanges, chainData):
7402 plt.plot(rng, data, marker='.', markersize=1, alpha=0.5, linewidth=0.0,figure=myfig)
7403 plt.title("Gelman-Rubin R = %g"%(pos.gelman_rubin(par_name)))
7404
7405 #dataPairs=[ [rng, data] for (rng,data) in zip(chainDataRanges, chainData)]
7406 #flattenedData=[ item for pair in dataPairs for item in pair ]
7407 #maxLen=max([len(data) for data in flattenedData])
7408 #plt.plot(array(flattenedData),marker=',',linewidth=0.0,figure=myfig)
7409
7410
7411 injpar=pos[par_name].injval
7412
7413 if injpar is not None:
7414 # Allow injection to be 5% outside the posterior plot
7415 minrange=min(pos_samps)-0.05*(max(pos_samps)-min(pos_samps))
7416 maxrange=max(pos_samps)+0.05*(max(pos_samps)-min(pos_samps))
7417 if minrange<injpar and maxrange>injpar:
7418 plt.axhline(injpar, color='r', linestyle='-.',linewidth=4)
7419 myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png')))
7420 if(savepdfs): myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.pdf')))
7421 plt.close(myfig)
7422 acfail=0
7423 if not (noacf):
7424 acffig=plt.figure(figsize=(4,3.5),dpi=200)
7425 if not ("chain" in pos.names):
7426 data=pos_samps[:,0]
7427 try:
7428 (Neff, acl, acf) = effectiveSampleSize(data, Nskip)
7429 lines=plt.plot(acf, 'k.', marker='.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7430 # Give ACL info if not already downsampled according to it
7431 if nDownsample is None:
7432 plt.title('Autocorrelation Function')
7433 elif 'cycle' in pos.names:
7434 last_color = lines[-1].get_color()
7435 plt.axvline(acl/Nskip, linestyle='-.', color=last_color)
7436 plt.title('ACL = %i N = %i'%(acl,Neff))
7437 acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.png')))
7438 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.pdf')))
7439 plt.close(acffig)
7440 except:
7441 # Ignore
7442 acfail=1
7443 pass
7444 else:
7445 try:
7446 acls = []
7447 Nsamps = 0.0;
7448 for rng, data, Nskip, Ncycles in zip(chainDataRanges, chainData, chainNskips, chainNcycles):
7449 (Neff, acl, acf) = effectiveSampleSize(data, Nskip)
7450 acls.append(acl)
7451 Nsamps += Neff
7452 lines=plt.plot(acf,'k.', marker='.', markersize=1, alpha=0.5, linewidth=0.0, figure=acffig)
7453 # Give ACL info if not already downsampled according to it
7454 if nDownsample is not None:
7455 last_color = lines[-1].get_color()
7456 plt.axvline(acl/Nskip, linestyle='-.', color=last_color)
7457 if nDownsample is None:
7458 plt.title('Autocorrelation Function')
7459 else:
7460 plt.title('ACL = %i N = %i'%(max(acls),Nsamps))
7461 acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.png')))
7462 if(savepdfs): acffig.savefig(os.path.join(sampsdir,figname.replace('.png','_acf.pdf')))
7463 plt.close(acffig)
7464 except:
7465 # Ignore
7466 acfail=1
7467 pass
7468
7469 if not noacf:
7470 if not acfail:
7471 acfhtml='<td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png', '_acf.png')+'"/></td>'
7472 else:
7473 acfhtml='<td>ACF generation failed!</td>'
7474 html_ompdf_write+='<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td>'+acfhtml+'</tr>'
7475 else:
7476 html_ompdf_write+='<tr><td width="30%"><img width="100%" src="1Dpdf/'+figname+'"/></td><td width="30%"><img width="100%" src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>'
7477
7478 html_ompdf_write+='</table>'
7479 html_ompdf.write(html_ompdf_write)
7480
7481 return out
#define max(a, b)
Return analytic likelihood values.
def names(self)
Return list of parameter names described by analytic likelihood function.
def __init__(self, covariance_matrix_files, mean_vector_files)
Prepare analytic likelihood for the given parameters.
def cdf(self, param)
Return PDF function for parameter.
def pdf(self, param)
Return PDF function for parameter.
Data structure for a table of posterior samples .
def __init__(self, commonResultsFormatData, SimBurstTableEntry=None, injFref=None, SnglBurstList=None, name=None, description=None)
Constructor.
def __init__(self, accuracy='auto')
Dec tick locations with some intelligence.
def __init__(self, min=-pi_constant/2.0, max=pi_constant/2.0)
object to store the structure of a kd tree
def setSplit(self, dimension, value)
def setImportance(self, sampleNumber, volume)
def __init__(self, bounding_box, left_child=None, right_child=None)
def search(self, coordinates)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def objects(self)
Returns the objects in the tree.
def __init__(self, objects)
Construct a kD-tree from a sequence of objects.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def volume(self)
Returns the volume of the bounding box of the tree.
def right(self)
Returns the right tree.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
A kD-tree suitable for splitting parameter spaces and counting hypervolumes.
def integrate(self, f, boxing=64)
Returns the integral of f(objects) over the tree.
def fillNewTree(self, boxing=64, isArea=False)
copies tree structure, but with KDSkeleton as the new nodes.
def __iter__(self)
Iterator over all the objects contained in the tree.
def left(self)
Returns the left tree.
def objects(self)
Returns the objects in the tree.
def split_dim(self)
Returns the dimension along which this level of the kD-tree splits.
def right(self)
Returns the right tree.
def volume(self)
Returns the volume of the bounding box of the tree.
def bounds(self)
Returns the coordinates of the lower-left and upper-right corners of the bounding box for this tree: ...
def search(self, coordinates, boxing=64)
takes a set of coordinates and searches down through the tree untill it gets to a box with less than ...
def __init__(self, objects, boundingbox, dims=0)
Construct a kD-tree from a sequence of objects.
def operate(self, f, g, boxing=64)
Operates on tree nodes exceeding boxing parameter depth.
A parser for the output of Bayesian parameter estimation codes.
def _find_ndownsample(self, files, logPthreshold, fixedBurnins, nDownsample)
def _common_to_pos(self, infile, info=[None, None])
def _hdf5_to_pos(self, infile, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _hdf5_to_table(self, infile, deltaLogP=None, fixedBurnin=None, nDownsample=None, multiple_chains=False, tablename=None, **kwargs)
def _infmcmc_to_pos(self, files, outdir=None, deltaLogP=None, fixedBurnins=None, nDownsample=None, oldMassConvention=False, **kwargs)
def _infmcmc_output_posterior_samples(self, files, runfile, outfile, logPThreshold, fixedBurnins, nskips=None, oldMassConvention=False)
def parse(self, files, **kwargs)
Parse files.
def _hdf5s_to_pos(self, infiles, fixedBurnins=None, deltaLogP=None, nDownsample=None, tablename=None, **kwargs)
def _ns_to_pos(self, files, Nlive=None, Npost=None, posfilename='posterior_samples.dat')
Data structure for a table of posterior samples .
def stdevs(self)
Return dict {paramName:paramStandardDeviation} .
def maxP(self)
Return the maximum a posteriori probability and the corresponding set of parameters.
def set_triggers(self, triggers)
Set the trigger values of the parameters.
def __init__(self, commonResultsFormatData, SimInspiralTableEntry=None, inj_spin_frame='OrbitalL', injFref=100, SnglInspiralList=None, name=None, description=None)
Constructor.
def bySample(self)
Generate a forward iterator over the list of samples corresponding to the data stored within the Post...
def __len__(self)
Container method.
def __iter__(self)
Container method.
def extend_posterior(self)
Add some useful derived parameters (such as tilt angles, time delays, etc) in the Posterior object.
def delete_NaN_entries(self, param_list)
Remove samples containing NaN in request params.
def name(self)
Return qualified string containing the 'name' of the Posterior instance.
def gelman_rubin(self, pname)
Returns an approximation to the Gelman-Rubin statistic (see Gelman, A.
def di_evidence(self, boxing=64)
Returns the log of the direct-integration evidence for the posterior samples.
def __getitem__(self, key)
Container method .
def _average_posterior(self, samples, post_name)
def _inj_spins(self, inj, frame='OrbitalL')
def forward(self)
Generate a forward iterator (in sense of list of names) over Posterior with name,one_d_pos.
def triggers(self)
Return the trigger values .
def append(self, one_d_posterior)
Container method.
def description(self)
Return qualified string containing a 'description' of the Posterior instance.
def bootstrap(self)
Returns a new Posterior object that contains a bootstrap sample of self.
def means(self)
Return dict {paramName:paramMean} .
def _average_posterior_like_prior(self, samples, logl_name, prior_name, log_bias=0)
def delete_samples_by_idx(self, samples)
Remove samples from all OneDPosteriors.
def maxL(self)
Return the maximum likelihood probability and the corresponding set of parameters.
def injection(self)
Return the injected values.
def healpix_map(self, resol, nest=True)
Returns a healpix map in the pixel ordering that represents the posterior density (per square degree)...
def _print_table_row(self, name, entries)
def _total_incl_restarts(self, samples)
def __str__(self)
Define a string representation of the Posterior class ; returns a html formatted table of various pro...
def DIC(self)
Returns the Deviance Information Criterion estimated from the posterior samples.
def write_to_file(self, fname)
Dump the posterior table to a file in the 'common format'.
def names(self)
Return list of parameter names.
def samples(self)
Return an (M,N) numpy.array of posterior samples; M = len(self); N = dim(self) .
def pop(self, param_name)
Container method.
def append_mapping(self, new_param_names, func, post_names)
Append posteriors pos1,pos2,...=func(post_names)
def elliptical_subregion_evidence(self)
Returns an approximation to the log(evidence) obtained by fitting an ellipse around the highest-poste...
def medians(self)
Return dict {paramName:paramMedian} .
def longest_chain_cycles(self)
Returns the number of cycles in the longest chain.
def dim(self)
Return number of parameters.
def set_injection(self, injection)
Set the injected values of the parameters.
def harmonic_mean_evidence(self)
Returns the log of the harmonic mean evidence for the set of posterior samples.
A data structure representing one parameter in a chain of posterior samples.
def name(self)
Return the string literal name of the parameter.
def median(self)
Return the median value for the marginal PDF on the parameter.
def delete_samples_by_idx(self, samples)
Remove samples from posterior, analagous to numpy.delete but opperates in place.
def injval(self)
Return the injected value set at construction .
def set_trigvals(self, new_trigvals)
Set the trigger values of the parameter.
def __len__(self)
Container method.
def __init__(self, name, posterior_samples, injected_value=None, injFref=None, trigger_values=None, prior=None)
Create an instance of PosteriorOneDPDF based on a table of posterior_samples.
def stacc(self)
Return the 'standard accuracy statistic' (stacc) of the marginal posterior of the parameter.
def set_injval(self, new_injval)
Set the injected/real value of the parameter.
def trigvals(self)
Return the trigger values set at construction.
def mean(self)
Return the arithmetic mean for the marginal PDF on the parameter.
def __getitem__(self, idx)
Container method .
def prob_interval(self, intervals)
Evaluate probability intervals.
def KL(self)
Returns the KL divergence between the prior and the posterior.
def gaussian_kde(self)
Return a SciPy gaussian_kde (representing a Gaussian KDE) of the samples.
def stdev(self)
Return the standard deviation of the marginal PDF on the parameter.
def samples(self)
Return a 1D numpy.array of the samples.
A single parameter sample object, suitable for inclusion in a kD-tree.
def __getitem__(self, key)
Return the element with the corresponding name.
def coord(self)
Return the coordinates for the parameter sample.
def __init__(self, sample_array, headers, coord_names)
Given the sample array, headers for the values, and the names of the desired coordinates,...
def __init__(self, accuracy='auto')
RA tick locations with some intelligence.
def __init__(self, min=0.0, max=2.0 *pi_constant)
def start(self, tag, attrib)
A base class for representing web content using ElementTree .
def toprettyxml(self)
Return a pretty-printed XML string of the htmlPage.
def insert_row(self, tab, label=None)
Insert row in table tab.
def insert_td(self, row, td, label=None, legend=None)
Insert cell td into row row.
def __init__(self, tag, attrib=None, parent=None)
def a(self, url, linktext)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, table_id=None, start_closed=True)
A concrete class for generating an XHTML(1) document.
def add_section(self, section_name, legend=None)
def add_collapse_section(self, section_name, legend=None, innertable_id=None, start_closed=True)
Create a section embedded into a table that can be collapsed with a button.
def add_section_to_element(self, section_name, parent)
Create a section which is not appended to the body of html, but to the parent Element.
def __init__(self, title=None, css=None, javascript=None, toc=False)
Represents a block of html fitting within a htmlPage.
def __init__(self, section_name, htmlElement=None, blank=False)
static const INT4 a
type
def plot_two_param_greedy_bins_hist(posterior, greedy2Params, confidence_levels)
Histograms of the ranked pixels produced by the 2-parameter greedy binning algorithm colured by their...
def cred_interval(x, cl=.9, lower=True)
Return location of lower or upper confidence levels Args: x: List of samples.
def histogram2D(posterior, greedy2Params, confidence_levels)
Returns a 2D histogram and edges for the two parameters passed in greedy2Params, plus the actual disc...
def plot_spline_pos(logf, ys, nf=100, level=0.9, color='k', label=None, xform=None)
Plot calibration posterior estimates for a spline model in log space.
def lambda_a(redshift, nonGR_alpha, lambda_eff, distance)
Converting from the effective wavelength-like parameter to lambda_A: lambda_A = lambda_{eff}*(D_alpha...
def spline_angle_xform(delta_psi)
Returns the angle in degrees corresponding to the spline calibration parameters delta_psi.
def q2eta(q)
Utility function for converting q to eta.
def plot_one_param_pdf(posterior, plot1DParams, analyticPDF=None, analyticCDF=None, plotkde=False)
Plots a 1D histogram and (gaussian) kernel density estimate of the distribution of posterior samples ...
def det_end_time(ifo_prefix, inj)
def autocorrelation(series)
Returns an estimate of the autocorrelation function of a given series.
def array_ang_sep(vec1, vec2)
Find angles between vectors in rows of numpy arrays.
def physical2radiationFrame(theta_jn, phi_jl, tilt1, tilt2, phi12, a1, a2, m1, m2, fref, phiref)
changes for testing Lorentz violations made till here
def plot_sky_map(hpmap, outdir, inj=None, nest=True)
Plots a sky map from a healpix map, optionally including an injected position.
def mc2q(mc, eta)
Utility function for converting mchirp,eta to new mass ratio q (m2/m1).
def kdtree_bin2Step(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10, injCoords=None, alternate=False, fraction=0.5, skyCoords=False)
input: posterior class instance, list of confidence levels, optional choice of inital parameter space...
def plot_calibration_pos(pos, level=.9, outpath=None)
def orbital_momentum_mag(fref, m1, m2, eta)
def plot_burst_waveform(pos=None, simburst=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def plot_two_param_kde(posterior, plot2DkdeParams)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def make_1d_table(html, legend, label, pos, pars, noacf, GreedyRes, onepdfdir, sampsdir, savepdfs, greedy, analyticLikelihood, nDownsample)
def plot_label(param)
A lookup table for plot labels.
def effectiveSampleSize(samples, Nskip=1)
Compute the effective sample size, calculating the ACL using only the second half of the samples to a...
def random_split(items, fraction)
def plot_waveform(pos=None, siminspiral=None, event=0, path=None, ifos=['H1', 'L1', 'V1'])
def getDecString(radians, accuracy='auto')
def plot_psd(psd_files, outpath=None, f_min=30.)
def autocorrelation_length_estimate(series, acf=None, M=5, K=2)
Attempts to find a self-consistent estimate of the autocorrelation length of a given series.
def contigious_interval_one_param(posterior, contInt1Params, confidence_levels)
Calculates the smallest contigious 1-parameter confidence interval for a set of given confidence leve...
def as_array(table)
Workaround for missing astropy.table.Table.as_array method, which was added in Astropy 1....
def addSample(tree, coordinates)
def cart2sph(x, y, z)
Utility function to convert cartesian coords to r,theta,phi.
def vo_nest2pos(nsresource, Nlive=None)
Parse a VO Table RESOURCE containing nested sampling output and return a VOTable TABLE element with p...
def find_ndownsample(samples, nDownsample)
Given a list of files, threshold value, and a desired number of outputs posterior samples,...
def skymap_inj_pvalue(hpmap, inj, nest=True)
Returns the greedy p-value estimate for the given injection.
def symm_tidal_params(lambda1, lambda2, q)
Calculate best tidal parameters [Eqs.
def kdtree_bin_sky_area(posterior, confidence_levels, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def getRAString(radians, accuracy='auto')
def kdtree_bin_sky_volume(posterior, confidence_levels)
def plot_one_param_pdf_kde(fig, onedpos)
def q2ms(mc, q)
Utility function for converting mchirp,q to component masses.
def skyArea(bounds)
functions used in 2stage kdtree
def amplitudeMeasure(redshift, nonGR_alpha, lambda_eff, distance)
Converting to Lorentz violating parameter "A" in dispersion relation from lambda_A: A = (lambda_A/h)^...
def plot_two_param_greedy_bins_contourf(posteriors_by_name, greedy2Params, confidence_levels, colors_by_name, figsize=(7, 6), dpi=120, figposition=[0.3, 0.3, 0.5, 0.5], legend='right', hatches_by_name=None)
def confidence_interval_uncertainty(cl, cl_bounds, posteriors)
Returns a tuple (relative_change, fractional_uncertainty, percentile_uncertainty) giving the uncertai...
def ang_dist(long1, lat1, long2, lat2)
Find the angular separation of (long1,lat1) and (long2,lat2), which are specified in radians.
def pol2cart(long, lat)
Utility function to convert longitude,latitude on a unit sphere to cartesian co-ordinates.
def kdtree_bin(posterior, coord_names, confidence_levels, initial_boundingbox=None, samples_per_bin=10)
takes samples and applies a KDTree to them to return confidence levels returns confidence_intervals -...
def plot_corner(posterior, levels, parnames=None)
Make a corner plot using the triangle module (See http://github.com/dfm/corner.py)
def sph2cart(r, theta, phi)
Utiltiy function to convert r,theta,phi to cartesian co-ordinates.
def get_end(siminspiral)
def get_inj_by_time(injections, time)
Filter injections to find the injection with end time given by time +/- 0.1s.
def array_dot(vec1, vec2)
Calculate dot products between vectors in rows of numpy arrays.
def rotation_matrix(angle, direction)
Compute general rotation matrices for a given angles and direction vectors.
def greedy_bin_two_param(posterior, greedy2Params, confidence_levels)
Determine the 2-parameter Bayesian Confidence Intervals using a greedy binning algorithm.
def chi_precessing(m1, a1, tilt1, m2, a2, tilt2)
Calculate the magnitude of the effective precessing spin following convention from Phys.
def skymap_confidence_areas(hpmap, cls)
Returns the area (in square degrees) for each confidence level with a greedy binning algorithm for th...
def readCoincXML(xml_file, trignum)
def calculate_redshift(distance, h=0.6790, om=0.3065, ol=0.6935, w0=-1.0)
Calculate the redshift from the luminosity distance measurement using the Cosmology Calculator provid...
def replace_column(table, old, new)
Workaround for missing astropy.table.Table.replace_column method, which was added in Astropy 1....
def mc2ms(mc, eta)
Utility function for converting mchirp,eta to component masses.
def array_polar_ang(vec)
Find polar angles of vectors in rows of a numpy array.
def component_momentum(m, a, theta, phi)
Calculate BH angular momentum vector.
def ROTATEY(angle, vx, vy, vz)
def DistanceMeasure(redshift, nonGR_alpha)
D_alpha = ((1+z)^(1-alpha))/H_0 * D_alpha # from eq.15 of arxiv 1110.2720 D_alpha calculated from int...
def integrand_distance(redshift, nonGR_alpha)
Following functions added for testing Lorentz violations.
def plot_two_param_kde_greedy_levels(posteriors_by_name, plot2DkdeParams, levels, colors_by_name, line_styles=__default_line_styles, figsize=(4, 3), dpi=250, figposition=[0.2, 0.2, 0.48, 0.75], legend='right', hatches_by_name=None, Npixels=50)
Plots a 2D kernel density estimate of the 2-parameter marginal posterior.
def orbital_momentum(fref, m1, m2, inclination)
Calculate orbital angular momentum vector.
def ROTATEZ(angle, vx, vy, vz)
def source_mass(mass, redshift)
Calculate source mass parameter for mass m as: m_source = m / (1.0 + z) For a parameter m.
def greedy_bin_one_param(posterior, greedy1Param, confidence_levels)
Determine the 1-parameter Bayesian Confidence Interval using a greedy binning algorithm.
def spin_angles(fref, mc, eta, incl, a1, theta1, phi1, a2=None, theta2=None, phi2=None)
Calculate physical spin angles.
def bbh_average_fits_precessing(m1, m2, chi1, chi2, tilt1, tilt2, phi12, quantity, fits)
Convenience functions.
Definition: nrutils.py:1311
def read_samples(filename, path=None, tablename=POSTERIOR_SAMPLES)
Read an HDF5 sample chain file.
Definition: hdf5.py:168
def draw_posterior_many(datas, Nlives, verbose=False)
Draw samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array datas,...
Definition: nest2pos.py:66
def draw_N_posterior_many(datas, Nlives, Npost, verbose=False)
Draw Npost samples from the posteriors represented by the (Nruns, Nsamples, Nparams)-shaped array dat...
Definition: nest2pos.py:115