Coverage for pesummary/tests/executable_test.py: 98.7%
1187 statements
« prev ^ index » next coverage.py v7.4.4, created at 2025-11-05 13:38 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2025-11-05 13:38 +0000
1# License under an MIT style license -- see LICENSE.md
3import os
4import shutil
5import glob
6import subprocess
7from getpass import getuser
8import numpy as np
10from .base import (
11 make_result_file, get_list_of_plots, get_list_of_files, data_dir,
12 testing_dir
13)
14import pytest
15from pesummary.utils.exceptions import InputError
16import importlib
17import tempfile
18from pathlib import Path
20tmpdir = Path(tempfile.TemporaryDirectory(prefix=".", dir=".").name).name
22__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
25class Base(object):
26 """Base class for testing the executables
27 """
28 def launch(self, command_line):
29 """
30 """
31 args = command_line.split(" ")
32 executable = args[0]
33 cla = args[1:]
34 module = importlib.import_module("pesummary.cli.{}".format(executable))
35 print(cla)
36 return module.main(args=[i for i in cla if i != " " and i != ""])
39class TestSummaryVersion(Base):
40 """Test the `summaryversion` executable
41 """
42 @pytest.mark.executabletest
43 def test_summaryversion(self):
44 """Test the `summaryversion` output matches pesummary.__version__
45 """
46 from pesummary import __version__
47 import io
48 from contextlib import redirect_stdout
50 f = io.StringIO()
51 with redirect_stdout(f):
52 self.launch("summaryversion")
53 out = f.getvalue()
54 assert out.split("\n")[1] == __version__
57class TestSummaryGracedb(Base):
58 """Test the `summarygracedb` executable with trivial examples
59 """
60 def setup_method(self):
61 """Setup the SummaryPublication class
62 """
63 if not os.path.isdir(tmpdir):
64 os.mkdir(tmpdir)
66 def teardown_method(self):
67 """Remove the files and directories created from this class
68 """
69 if os.path.isdir(tmpdir):
70 shutil.rmtree(tmpdir)
72 @pytest.mark.executabletest
73 def test_fake_event(self):
74 """Test that `summarygracedb` fails when a fake event is provided
75 """
76 from ligo.gracedb import exceptions
77 command_line = "summarygracedb --id S111111m"
78 with pytest.raises(exceptions.HTTPError):
79 self.launch(command_line)
81 @pytest.mark.executabletest
82 def test_output(self):
83 """Test the output from summarygracedb
84 """
85 import json
86 command_line = (
87 f"summarygracedb --id S190412m --output {tmpdir}/output.json"
88 )
89 self.launch(command_line)
90 with open(f"{tmpdir}/output.json", "r") as f:
91 data = json.load(f)
92 assert data["superevent_id"] == "S190412m"
93 assert "em_type" in data.keys()
94 command_line = (
95 f"summarygracedb --id S190412m --output {tmpdir}/output2.json "
96 "--info superevent_id far created"
97 )
98 self.launch(command_line)
99 with open(f"{tmpdir}/output2.json", "r") as f:
100 data2 = json.load(f)
101 assert len(data2) == 3
102 assert all(
103 info in data2.keys() for info in ["superevent_id", "far", "created"]
104 )
105 assert data2["superevent_id"] == data["superevent_id"]
106 assert data2["far"] == data["far"]
107 assert data2["created"] == data["created"]
110class TestSummaryDetchar(Base):
111 """Test the `summarydetchar` executable with trivial examples
112 """
113 def setup_method(self):
114 """Setup the SummaryDetchar class
115 """
116 from gwpy.timeseries import TimeSeries
117 if not os.path.isdir(tmpdir):
118 os.mkdir(tmpdir)
120 H1_series = TimeSeries(
121 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
122 )
123 H1_series.write(f"{tmpdir}/H1.gwf", format="gwf")
124 L1_series = TimeSeries(
125 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="L1:test"
126 )
127 L1_series.write(f"{tmpdir}/L1.hdf", format="hdf5")
129 def teardown_method(self):
130 """Remove the files and directories created from this class
131 """
132 if os.path.isdir(tmpdir):
133 shutil.rmtree(tmpdir)
135 @pytest.mark.executabletest
136 def test_spectrogram(self):
137 """Check that a spectrogram can be generated from the `summarydetchar`
138 executable
139 """
140 from gwpy.timeseries import TimeSeries
141 from matplotlib import rcParams
143 rcParams["text.usetex"] = False
144 command_line = (
145 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
146 f"--webdir {tmpdir} --plot spectrogram"
147 )
148 self.launch(command_line)
149 assert os.path.isfile(f"{tmpdir}/spectrogram_H1.png")
150 assert os.path.isfile(f"{tmpdir}/spectrogram_L1.png")
152 @pytest.mark.executabletest
153 def test_omegascan(self):
154 """Check that an omegascan can be generated from the `summarydetchar`
155 executable
156 """
157 from gwpy.timeseries import TimeSeries
158 command_line = (
159 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
160 f"--webdir {tmpdir} --plot omegascan --gps 150 --window 0.1"
161 )
162 self.launch(command_line)
163 assert os.path.isfile(f"{tmpdir}/omegascan_H1.png")
164 assert os.path.isfile(f"{tmpdir}/omegascan_L1.png")
167class TestSummaryPublication(Base):
168 """Test the `summarypublication` executable with trivial examples
169 """
170 def setup_method(self):
171 """Setup the SummaryPublication class
172 """
173 if not os.path.isdir(tmpdir):
174 os.mkdir(tmpdir)
175 make_result_file(bilby=True, gw=True, outdir=tmpdir)
176 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
178 def teardown_method(self):
179 """Remove the files and directories created from this class
180 """
181 if os.path.isdir(tmpdir):
182 shutil.rmtree(tmpdir)
184 @pytest.mark.executabletest
185 def test_2d_contour(self):
186 """Test the 2d contour plot generation
187 """
188 command_line = (
189 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
190 "--labels test --parameters mass_1 mass_2 --levels 0.9 0.5 "
191 "--plot 2d_contour --palette colorblind"
192 )
193 self.launch(command_line)
194 assert os.path.isfile(
195 os.path.join(tmpdir, "2d_contour_plot_mass_1_and_mass_2.png")
196 )
198 @pytest.mark.executabletest
199 def test_violin(self):
200 """Test the violin plot generation
201 """
202 command_line = (
203 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
204 "--labels test --parameters mass_1 --plot violin "
205 "--palette colorblind"
206 )
207 self.launch(command_line)
208 assert os.path.isfile(
209 os.path.join(tmpdir, "violin_plot_mass_1.png")
210 )
212 @pytest.mark.executabletest
213 def test_spin_disk(self):
214 """Test the spin disk generation
215 """
216 command_line = (
217 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
218 "--labels test --parameters mass_1 --plot spin_disk "
219 "--palette colorblind"
220 )
221 self.launch(command_line)
222 assert os.path.isfile(
223 os.path.join(tmpdir, "spin_disk_plot_test.png")
224 )
227class TestSummaryPipe(Base):
228 """Test the `summarypipe` executable with trivial examples
229 """
230 def setup_method(self):
231 """Setup the SummaryPipe class
232 """
233 self.dirs = [
234 tmpdir, "{}/lalinference".format(tmpdir), "{}/bilby".format(tmpdir),
235 "{}/lalinference/posterior_samples".format(tmpdir),
236 "{}/lalinference/ROQdata".format(tmpdir),
237 "{}/lalinference/engine".format(tmpdir),
238 "{}/lalinference/caches".format(tmpdir),
239 "{}/lalinference/log".format(tmpdir),
240 "{}/bilby/data".format(tmpdir), "{}/bilby/result".format(tmpdir),
241 "{}/bilby/submit".format(tmpdir),
242 "{}/bilby/log_data_analysis".format(tmpdir)
243 ]
244 for dd in self.dirs:
245 if not os.path.isdir(dd):
246 os.mkdir(dd)
247 make_result_file(
248 gw=False, lalinference=True,
249 outdir="{}/lalinference/posterior_samples/".format(tmpdir)
250 )
251 os.rename(
252 "{}/lalinference/posterior_samples/test.hdf5".format(tmpdir),
253 "{}/lalinference/posterior_samples/posterior_HL_result.hdf5".format(tmpdir)
254 )
255 make_result_file(
256 gw=False, bilby=True, outdir="{}/bilby/result/".format(tmpdir)
257 )
258 os.rename(
259 "{}/bilby/result/test.json".format(tmpdir),
260 "{}/bilby/result/label_result.json".format(tmpdir)
261 )
263 def add_config_file(self):
264 shutil.copyfile(
265 os.path.join(data_dir, "config_lalinference.ini"),
266 "{}/lalinference/config.ini".format(tmpdir)
267 )
268 shutil.copyfile(
269 os.path.join(data_dir, "config_bilby.ini"),
270 "{}/bilby/config.ini".format(tmpdir)
271 )
273 def teardown_method(self):
274 """Remove the files and directories created from this class
275 """
276 for dd in self.dirs:
277 if os.path.isdir(dd):
278 shutil.rmtree(dd)
280 @pytest.mark.executabletest
281 def test_no_config(self):
282 """Test that the code fails if there is no config file in the
283 directory
284 """
285 for _type in ["lalinference", "bilby"]:
286 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
287 with pytest.raises(FileNotFoundError):
288 self.launch(command_line)
290 @pytest.mark.executabletest
291 def test_no_samples(self):
292 """Test that the code fails if there are no posterior samples in the
293 directory
294 """
295 self.add_config_file()
296 for _type in ["lalinference", "bilby"]:
297 if _type == "lalinference":
298 os.remove(
299 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
300 tmpdir, _type
301 )
302 )
303 else:
304 os.remove(
305 "{}/{}/result/label_result.json".format(tmpdir, _type)
306 )
307 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
308 with pytest.raises(FileNotFoundError):
309 self.launch(command_line)
311 @pytest.mark.executabletest
312 def test_basic(self):
313 """Test that the code runs for a trivial example
314 """
315 self.add_config_file()
316 for _type in ["lalinference", "bilby"]:
317 command_line = (
318 "summarypipe --rundir {}/{} --return_string".format(tmpdir, _type)
319 )
320 output = self.launch(command_line)
321 assert "--config" in output
322 print(output)
323 print("{}/{}/config.ini".format(tmpdir, _type))
324 assert "{}/{}/config.ini".format(tmpdir, _type) in output
325 assert "--samples" in output
326 if _type == "lalinference":
327 _f = (
328 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
329 tmpdir, _type
330 )
331 )
332 else:
333 _f = "{}/{}/result/label_result.json".format(tmpdir, _type)
334 assert _f in output
335 assert "--webdir" in output
336 assert "--approximant" in output
337 assert "--labels" in output
339 @pytest.mark.executabletest
340 def test_override(self):
341 """Test that when you provide an option from the command line it
342 overrides the one inferred from the rundir
343 """
344 self.add_config_file()
345 command_line = (
346 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
347 )
348 output = self.launch(command_line)
349 command_line += " --labels hello"
350 output2 = self.launch(command_line)
351 assert output != output2
352 label = output.split(" ")[output.split(" ").index("--labels") + 1]
353 label2 = output2.split(" ")[output2.split(" ").index("--labels") + 1]
354 assert label != label2
355 assert label2 == "hello"
357 @pytest.mark.executabletest
358 def test_add_to_summarypages_command(self):
359 """Test that when you provide an option from the command line that
360 is not already in the summarypages command line, it adds it to the one
361 inferred from the rundir
362 """
363 self.add_config_file()
364 command_line = (
365 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
366 )
367 output = self.launch(command_line)
368 command_line += " --multi_process 10 --kde_plot --cosmology Planck15_lal"
369 output2 = self.launch(command_line)
370 assert output != output2
371 assert "--multi_process 10" in output2
372 assert "--cosmology Planck15_lal" in output2
373 assert "--kde_plot" in output2
374 assert "--multi_process 10" not in output
375 assert "--cosmology Planck15_lal" not in output
376 assert "--kde_plot" not in output
379class TestSummaryPages(Base):
380 """Test the `summarypages` executable with trivial examples
381 """
382 def setup_method(self):
383 """Setup the SummaryClassification class
384 """
385 self.dirs = [tmpdir, "{}1".format(tmpdir), "{}2".format(tmpdir)]
386 for dd in self.dirs:
387 if not os.path.isdir(dd):
388 os.mkdir(dd)
389 make_result_file(outdir=tmpdir, gw=False, extension="json")
390 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
391 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
392 os.rename("{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir))
394 def teardown_method(self):
395 """Remove the files and directories created from this class
396 """
397 for dd in self.dirs:
398 if os.path.isdir(dd):
399 shutil.rmtree(dd)
401 def check_output(
402 self, number=1, mcmc=False, existing_plot=False, expert=False,
403 gw=False
404 ):
405 """Check the output from the summarypages executable
406 """
407 assert os.path.isfile("{}/home.html".format(tmpdir))
408 plots = get_list_of_plots(
409 gw=gw, number=number, mcmc=mcmc, existing_plot=existing_plot,
410 expert=expert, outdir=tmpdir
411 )
412 for i, j in zip(
413 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
414 ):
415 print(i, j)
416 assert all(
417 i == j for i, j in zip(
418 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
419 )
420 )
421 files = get_list_of_files(
422 gw=gw, number=number, existing_plot=existing_plot, outdir=tmpdir
423 )
424 assert all(
425 i == j for i, j in zip(
426 sorted(files), sorted(glob.glob("{}/html/*.html".format(tmpdir)))
427 )
428 )
430 @pytest.mark.executabletest
431 def test_descriptions(self):
432 """Check that summarypages stores the correct descriptions when the
433 `--descriptions` flag is provided
434 """
435 import json
436 from pesummary.io import read
437 command_line = (
438 "summarypages --webdir {0} --samples {0}/example.json "
439 "{0}/example.json --labels core0 core1 --nsamples 100 "
440 "--disable_corner --descriptions core0:Description".format(tmpdir)
441 )
442 self.launch(command_line)
443 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
444 assert opened.description["core0"] == "Description"
445 assert opened.description["core1"] == "No description found"
447 with open("{}/descriptions.json".format(tmpdir), "w") as f:
448 json.dump({"core0": "Testing description", "core1": "Test"}, f)
449 command_line = (
450 "summarypages --webdir {0} --samples {0}/example.json "
451 "{0}/example.json --labels core0 core1 --nsamples 100 "
452 "--disable_corner --descriptions {0}/descriptions.json".format(tmpdir)
453 )
454 self.launch(command_line)
455 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
456 assert opened.description["core0"] == "Testing description"
457 assert opened.description["core1"] == "Test"
459 @pytest.mark.executabletest
460 def test_reweight(self):
461 """Check that summarypages reweights the posterior samples if the
462 `--reweight_samples` flag is provided
463 """
464 from pesummary.io import read
465 make_result_file(gw=True, extension="json", outdir=tmpdir)
466 command_line = (
467 "summarypages --webdir {0} --samples {0}/test.json --gw "
468 "--labels gw0 --nsamples 100 --disable_corner "
469 "--reweight_samples uniform_in_comoving_volume "
470 "--pastro_category_file {1}/rates.yml "
471 "--catch_terrestrial_probability_error".format(tmpdir, testing_dir)
472 )
473 self.launch(command_line)
474 self.check_output(number=1, expert=False, gw=True)
475 original = read("{0}/test.json".format(tmpdir)).samples_dict
476 _reweighted = read("{0}/samples/posterior_samples.h5".format(tmpdir))
477 reweighted = _reweighted.samples_dict
478 assert original.number_of_samples >= reweighted["gw0"].number_of_samples
479 inds = np.array([
480 original.parameters.index(param) for param in
481 reweighted["gw0"].parameters if param in original.parameters
482 ])
483 assert all(
484 reweighted_sample[inds] in original.samples.T for reweighted_sample
485 in reweighted["gw0"].samples.T
486 )
487 _kwargs = _reweighted.extra_kwargs[0]
488 assert _kwargs["sampler"]["nsamples_before_reweighting"] == 100
489 assert _kwargs["sampler"]["nsamples"] == reweighted["gw0"].number_of_samples
490 assert _kwargs["meta_data"]["reweighting"] == "uniform_in_comoving_volume"
492 @pytest.mark.executabletest
493 def test_checkpoint(self):
494 """Check that when restarting from checkpoint, the outputs are
495 consistent
496 """
497 import time
498 command_line = (
499 "summarypages --webdir {0} --samples {0}/example.json "
500 "--labels core0 --nsamples 100 "
501 "--restart_from_checkpoint".format(tmpdir)
502 )
503 t0 = time.time()
504 self.launch(command_line)
505 t1 = time.time()
506 assert os.path.isfile("{}/checkpoint/pesummary_resume.pickle".format(tmpdir))
507 self.check_output(number=1, expert=False)
508 t2 = time.time()
509 self.launch(command_line)
510 t3 = time.time()
511 assert t3 - t2 < t1 - t0
512 self.check_output(number=1, expert=False)
513 # get timestamp of plot
514 made_time = os.path.getmtime(glob.glob("{}/plots/*.png".format(tmpdir))[0])
515 assert made_time < t2
517 @pytest.mark.executabletest
518 def test_expert(self):
519 """Check that summarypages produces the expected expert diagnostic
520 plots
521 """
522 command_line = (
523 "summarypages --webdir {0} --samples {0}/example.json "
524 "--labels core0 --nsamples 100".format(tmpdir)
525 )
526 self.launch(command_line)
527 self.check_output(number=1, expert=False)
528 command_line = (
529 "summarypages --webdir {0} --samples {0}/example.json "
530 "--labels core0 --nsamples 100 --enable_expert".format(tmpdir)
531 )
532 self.launch(command_line)
533 self.check_output(number=1, expert=True)
535 @pytest.mark.executabletest
536 def test_prior_input(self):
537 """Check that `summarypages` works when a prior file is passed from
538 the command line
539 """
540 import importlib
541 from bilby import gw
543 path = gw.__path__[0]
544 bilby_prior_file = os.path.join(
545 path, "prior_files", "GW150914.prior"
546 )
548 for package in ["core", "gw"]:
549 gw = True if package == "gw" else False
550 module = importlib.import_module(
551 "pesummary.{}.file.read".format(package)
552 )
553 make_result_file(outdir=tmpdir, gw=gw, extension="json")
554 os.rename("{}/test.json".format(tmpdir), "{}/prior.json".format(tmpdir))
555 for _file in ["{}/prior.json".format(tmpdir), bilby_prior_file]:
556 command_line = (
557 "summarypages --webdir {} --samples {}/example.json "
558 "--labels test --prior_file {} --nsamples_for_prior "
559 "10 ".format(tmpdir, tmpdir, _file)
560 )
561 command_line += " --gw" if gw else ""
562 self.launch(command_line)
563 f = module.read("{}/samples/posterior_samples.h5".format(tmpdir))
564 if _file != bilby_prior_file:
565 stored = f.priors["samples"]["test"]
566 f = module.read(_file)
567 original = f.samples_dict
568 for param in original.keys():
569 np.testing.assert_almost_equal(
570 original[param], stored[param]
571 )
572 # Non-bilby prior file will have same number or prior
573 # samples as posterior samples
574 assert len(stored[param]) == 1000
575 else:
576 from bilby.core.prior import PriorDict
578 analytic = f.priors["analytic"]["test"]
579 bilby_prior = PriorDict(filename=bilby_prior_file)
580 for param, value in bilby_prior.items():
581 assert analytic[param] == str(value)
582 params = list(f.priors["samples"]["test"].keys())
583 # A bilby prior file will have 10 prior samples
584 assert len(f.priors["samples"]["test"][params[0]]) == 10
586 @pytest.mark.executabletest
587 def test_calibration_and_psd(self):
588 """Test that the calibration and psd files are passed appropiately
589 """
590 from pesummary.gw.file.read import read
591 from .base import make_psd, make_calibration
593 make_psd(outdir=tmpdir)
594 make_calibration(outdir=tmpdir)
595 command_line = (
596 "summarypages --webdir {0} --samples {0}/example.json "
597 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
598 "--labels test --posterior_samples_filename example.h5 "
599 "--calibration_definition template".format(tmpdir)
600 )
601 self.launch(command_line)
602 f = read("{}/samples/example.h5".format(tmpdir))
603 psd = np.genfromtxt("{}/psd.dat".format(tmpdir))
604 calibration = np.genfromtxt("{}/calibration.dat".format(tmpdir))
605 np.testing.assert_almost_equal(f.psd["test"]["H1"], psd)
606 np.testing.assert_almost_equal(
607 f.priors["calibration"]["test"]["L1"], calibration
608 )
610 @pytest.mark.executabletest
611 def test_strain_data(self):
612 """Test that the gravitational wave data is passed appropiately
613 """
614 from pesummary.io import read
615 from gwpy.timeseries import TimeSeries
617 H1_series = TimeSeries(
618 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
619 )
620 H1_series.write("{}/H1.gwf".format(tmpdir), format="gwf")
621 L1_series = TimeSeries(
622 np.random.uniform(-1, 1, 1000), t0=201, dt=0.2, name="L1:test"
623 )
624 L1_series.write("{}/L1.hdf".format(tmpdir), format="hdf5")
625 command_line = (
626 "summarypages --webdir {0} --samples {0}/example.json "
627 "--gwdata H1:test:{0}/H1.gwf L1:test:{0}/L1.hdf "
628 "--labels test --disable_corner --disable_interactive".format(tmpdir)
629 )
630 self.launch(command_line)
631 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
632 gwdata = f.gwdata
633 assert all(IFO in gwdata.detectors for IFO in ["H1", "L1"])
634 strain = {"H1": H1_series, "L1": L1_series}
635 for IFO in gwdata.detectors:
636 np.testing.assert_almost_equal(gwdata[IFO].value, strain[IFO].value)
637 assert gwdata[IFO].t0 == strain[IFO].t0
638 assert gwdata[IFO].dt == strain[IFO].dt
639 assert gwdata[IFO].unit == strain[IFO].unit
641 @pytest.mark.executabletest
642 def test_gracedb(self):
643 """Test that when the gracedb ID is passed from the command line it is
644 correctly stored in the meta data
645 """
646 from pesummary.gw.file.read import read
648 command_line = (
649 "summarypages --webdir {0} --samples {0}/example.json "
650 "--gracedb G17864 --gw --labels test".format(tmpdir)
651 )
652 self.launch(command_line)
653 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
654 assert "gracedb" in f.extra_kwargs[0]["meta_data"]
655 assert "G17864" == f.extra_kwargs[0]["meta_data"]["gracedb"]["id"]
657 @pytest.mark.executabletest
658 def test_single(self):
659 """Test on a single input
660 """
661 command_line = (
662 "summarypages --webdir {0} --samples "
663 "{0}/example.json --label core0 ".format(tmpdir)
664 )
665 self.launch(command_line)
666 self.check_output(number=1)
668 @pytest.mark.executabletest
669 def test_summarycombine_output(self):
670 """Test on a summarycombine output
671 """
672 from .base import make_psd, make_calibration
674 make_psd(outdir=tmpdir)
675 make_calibration(outdir=tmpdir)
676 command_line = (
677 "summarycombine --webdir {0}1 --samples "
678 "{0}/example.json --label gw0 "
679 "--calibration L1:{0}/calibration.dat --gw".format(tmpdir)
680 )
681 self.launch(command_line)
682 command_line = (
683 "summarycombine --webdir {0}2 --samples "
684 "{0}/example.json --label gw1 "
685 "--psd H1:{0}/psd.dat --gw".format(tmpdir)
686 )
687 self.launch(command_line)
688 command_line = (
689 "summarycombine --webdir {0} --gw --samples "
690 "{0}1/samples/posterior_samples.h5 "
691 "{0}2/samples/posterior_samples.h5 ".format(tmpdir)
692 )
693 self.launch(command_line)
694 command_line = (
695 "summarypages --webdir {0} --gw --samples "
696 "{0}/samples/posterior_samples.h5 ".format(tmpdir)
697 )
698 self.launch(command_line)
700 @pytest.mark.executabletest
701 def test_mcmc(self):
702 """Test the `--mcmc_samples` command line argument
703 """
704 command_line = (
705 "summarypages --webdir {0} --samples "
706 "{0}/example.json {0}/example2.h5 "
707 "--label core0 --mcmc_samples".format(tmpdir)
708 )
709 self.launch(command_line)
710 self.check_output(number=1, mcmc=True)
712 @pytest.mark.executabletest
713 def test_kde_plot(self):
714 """Test that the kde plots work on a single input and on MCMC inputs
715 """
716 command_line = (
717 "summarypages --webdir {0} --samples "
718 "{0}/example.json --label core0 --kde_plot "
719 "".format(tmpdir)
720 )
721 self.launch(command_line)
722 self.check_output(number=1)
723 command_line = (
724 "summarypages --webdir {0} --samples "
725 "{0}/example.json {0}/example2.h5 "
726 "--label core0 --mcmc_samples --kde_plot".format(tmpdir)
727 )
728 self.launch(command_line)
729 self.check_output(number=1, mcmc=True)
731 @pytest.mark.executabletest
732 def test_mcmc_more_than_label(self):
733 """Test that the code fails with the `--mcmc_samples` command line
734 argument when multiple labels are passed.
735 """
736 command_line = (
737 "summarypages --webdir {0} --samples "
738 "{0}/example.json {0}/example2.h5 "
739 "{0}/example.json {0}/example2.h5 "
740 "--label core0 core1 --mcmc_samples".format(tmpdir)
741 )
742 with pytest.raises(InputError):
743 self.launch(command_line)
745 @pytest.mark.executabletest
746 def test_file_format_wrong_number(self):
747 """Test that the code fails with the `--file_format` command line
748 argument when the number of file formats does not match the number of
749 samples
750 """
751 command_line = (
752 "summarypages --webdir {0} --samples "
753 "{0}/example.json {0}/example2.h5 "
754 "--file_format hdf5 json dat".format(tmpdir)
755 )
756 with pytest.raises(InputError):
757 self.launch(command_line)
759 @pytest.mark.executabletest
760 def test_add_existing_plot(self):
761 """Test that an Additional page is made if existing plots are provided
762 to the summarypages executable
763 """
764 with open("{}/test.png".format(tmpdir), "w") as f:
765 f.writelines("")
766 command_line = (
767 "summarypages --webdir {0} --samples "
768 "{0}/example.json --label core0 --add_existing_plot "
769 "core0:{0}/test.png ".format(tmpdir)
770 )
771 self.launch(command_line)
772 self.check_output(number=1, existing_plot=True)
773 command_line = (
774 "summarypages --webdir {0} --samples "
775 "{0}/example.json {0}/example.json --label core0 core1 "
776 "--add_existing_plot core0:{0}/test.png core1:{0}/test.png "
777 "".format(tmpdir)
778 )
779 self.launch(command_line)
780 self.check_output(number=2, existing_plot=True)
783class TestSummaryPagesLW(Base):
784 """Test the `summarypageslw` executable
785 """
786 def setup_method(self):
787 """Setup the SummaryPagesLW class
788 """
789 if not os.path.isdir(tmpdir):
790 os.mkdir(tmpdir)
791 make_result_file(bilby=True, gw=True, outdir=tmpdir)
792 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
794 def teardown_method(self):
795 """Remove the files and directories created from this class
796 """
797 if os.path.isdir(tmpdir):
798 shutil.rmtree(tmpdir)
800 def check_output(
801 self, gw=False, number=1, outdir=tmpdir, parameters=[], sections=[],
802 extra_gw_plots=True
803 ):
804 """Check the output from the summarypages executable
805 """
806 assert os.path.isfile("./{}/home.html".format(outdir))
807 plots = get_list_of_plots(
808 gw=gw, number=number, mcmc=False, existing_plot=False,
809 expert=False, parameters=parameters, outdir=outdir,
810 extra_gw_plots=extra_gw_plots,
811 remove_gw_plots=["classification"]
812 )
813 assert all(
814 i in plots for i in glob.glob("{}/plots/*.png".format(outdir))
815 )
816 assert all(
817 i in glob.glob("{}/plots/*.png".format(outdir)) for i in plots
818 )
819 files = get_list_of_files(
820 gw=gw, number=number, existing_plot=False, parameters=parameters,
821 sections=sections, outdir=outdir, extra_gw_pages=extra_gw_plots,
822 remove_gw_pages=["classification"]
823 )
824 assert all(
825 i in files for i in glob.glob("{}/html/*.html".format(outdir))
826 )
827 for i in files:
828 print(i, i in glob.glob("{}/html/*.html".format(outdir)))
829 assert all(
830 i in glob.glob("{}/html/*.html".format(outdir)) for i in files
831 )
833 @pytest.mark.executabletest
834 def test_single(self):
835 """Test that the `summarypageslw` executable works as expected
836 when a single result file is provided
837 """
838 command_line = (
839 "summarypageslw --webdir {0} --samples {0}/bilby.json "
840 "--labels core0 --parameters mass_1 mass_2 "
841 "".format(tmpdir)
842 )
843 self.launch(command_line)
844 self.check_output(parameters=["mass_1", "mass_2"], sections=["M-P"])
845 command_line = (
846 "summarypageslw --webdir {0}/gw --samples {0}/bilby.json "
847 "--labels gw0 --parameters mass_1 mass_2 "
848 "--gw".format(tmpdir)
849 )
850 self.launch(command_line)
851 self.check_output(
852 gw=True, parameters=["mass_1", "mass_2"], sections=["masses"],
853 outdir="{}/gw".format(tmpdir), extra_gw_plots=False
854 )
855 command_line = command_line.replace(
856 "{}/gw".format(tmpdir), "{}/gw2".format(tmpdir)
857 )
858 command_line = command_line.replace("mass_1", "made_up_label")
859 self.launch(command_line)
860 self.check_output(
861 gw=True, parameters=["mass_2"], sections=["masses"],
862 outdir="{}/gw2".format(tmpdir), extra_gw_plots=False
863 )
864 with pytest.raises(Exception):
865 command_line = command_line.replace("mass_2", "made_up_label2")
866 self.launch(command_line)
868 @pytest.mark.executabletest
869 def test_double(self):
870 """Test that the `summarypageslw` executable works as expected
871 when multiple result files are provided
872 """
873 command_line = (
874 "summarypageslw --webdir {0} --samples {0}/bilby.json "
875 "{0}/bilby.json --labels core0 core1 --parameters mass_1 mass_2 "
876 "".format(tmpdir)
877 )
878 self.launch(command_line)
879 self.check_output(
880 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"]
881 )
883 @pytest.mark.executabletest
884 def test_pesummary(self):
885 """Test that the `summarypageslw` executable works as expected
886 for a pesummary metafile
887 """
888 command_line = (
889 "summarycombine --webdir {0} --samples {0}/bilby.json "
890 "{0}/bilby.json --no_conversion --gw --labels core0 core1 "
891 "--nsamples 100".format(tmpdir)
892 )
893 self.launch(command_line)
894 command_line = (
895 "summarypageslw --webdir {0}/lw --samples "
896 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
897 "".format(tmpdir)
898 )
899 self.launch(command_line)
900 self.check_output(
901 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"],
902 outdir="{}/lw".format(tmpdir)
903 )
904 command_line = command_line.replace(
905 "{}/lw".format(tmpdir), "{}/lw2".format(tmpdir)
906 )
907 command_line = command_line.replace("mass_1", "made_up_label")
908 self.launch(command_line)
909 self.check_output(
910 number=2, parameters=["mass_2"], sections=["M-P"],
911 outdir="{}/lw2".format(tmpdir)
912 )
913 make_result_file(bilby=True, gw=False, outdir=tmpdir)
914 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
915 command_line = (
916 "summarycombine --webdir {0} --samples {0}/bilby.json "
917 "{0}/bilby2.json --no_conversion --gw --labels core0 core1 "
918 "--nsamples 100".format(tmpdir)
919 )
920 self.launch(command_line)
921 command_line = (
922 "summarypageslw --webdir {0}/lw3 --samples "
923 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
924 "".format(tmpdir)
925 )
926 self.launch(command_line)
927 self.check_output(
928 number=1, parameters=["mass_1", "mass_2"], sections=["M-P"],
929 outdir="{}/lw3".format(tmpdir)
930 )
933class TestSummaryClassification(Base):
934 """Test the `summaryclassification` executable
935 """
936 def setup_method(self):
937 """Setup the SummaryClassification class
938 """
939 if not os.path.isdir(tmpdir):
940 os.mkdir(tmpdir)
941 make_result_file(outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test")
942 os.rename("{}/test.json".format(tmpdir), "{}/pesummary.json".format(tmpdir))
943 make_result_file(outdir=tmpdir, bilby=True, gw=True)
944 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
946 def teardown_method(self):
947 """Remove the files and directories created from this class
948 """
949 if os.path.isdir(tmpdir):
950 shutil.rmtree(tmpdir)
952 def check_output(self):
953 """Check the output from the `summaryclassification` executable
954 """
955 import glob
956 import json
958 files = glob.glob("{}/*".format(tmpdir))
959 assert "{}/test_pe_classification.json".format(tmpdir) in files
960 assert "{}/test_pastro_bar.png".format(tmpdir) in files
961 with open("{}/test_pe_classification.json".format(tmpdir), "r") as f:
962 data = json.load(f)
963 assert all(
964 i in data.keys() for i in [
965 "Terrestrial", "BNS", "NSBH", "BBH", "HasMassGap", "HasNS",
966 "HasRemnant"
967 ]
968 )
970 @pytest.mark.executabletest
971 def test_result_file(self):
972 """Test the `summaryclassification` executable for a random result file
973 """
974 command_line = (
975 "summaryclassification --webdir {0} --samples "
976 "{0}/bilby.json --prior default --label test "
977 "--pastro_category_file {1}/rates.yml "
978 "--catch_terrestrial_probability_error".format(tmpdir, testing_dir)
979 )
980 self.launch(command_line)
981 self.check_output()
983 @pytest.mark.executabletest
984 def test_pesummary_file(self):
985 """Test the `summaryclassification` executable for a pesummary metafile
986 """
987 command_line = (
988 "summaryclassification --webdir {0} --samples "
989 "{0}/pesummary.json --prior default "
990 "--pastro_category_file {1}/rates.yml "
991 "--catch_terrestrial_probability_error".format(tmpdir, testing_dir)
992 )
993 self.launch(command_line)
994 self.check_output()
997class TestSummaryTGR(Base):
998 """Test the `summarytgr` executable
999 """
1000 def setup_method(self):
1001 """Setup the SummaryTGR class
1002 """
1003 if not os.path.isdir(tmpdir):
1004 os.mkdir(tmpdir)
1005 make_result_file(
1006 outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test"
1007 )
1008 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/pesummary.json")
1009 make_result_file(outdir=tmpdir, bilby=True, gw=True)
1010 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
1012 def teardown_method(self):
1013 """Remove the files and directories created from this class
1014 """
1015 if os.path.isdir(tmpdir):
1016 shutil.rmtree(tmpdir)
1018 def check_output(self, diagnostic=True):
1019 """Check the output from the `summarytgr` executable
1020 """
1021 import glob
1023 image_files = glob.glob(f"{tmpdir}/plots/*")
1024 image_base_string = tmpdir + "/plots/primary_imrct_{}.png"
1025 file_strings = ["deviations_triangle_plot"]
1026 if diagnostic:
1027 file_strings += [
1028 "mass_1_mass_2", "a_1_a_2",
1029 "final_mass_non_evolved_final_spin_non_evolved"
1030 ]
1031 for file_string in file_strings:
1032 assert image_base_string.format(file_string) in image_files
1034 @pytest.mark.executabletest
1035 def test_result_file(self):
1036 """Test the `summarytgr` executable for a random result file
1037 """
1038 command_line = (
1039 f"summarytgr --webdir {tmpdir} "
1040 f"--samples {tmpdir}/bilby.json {tmpdir}/bilby.json "
1041 "--test imrct "
1042 "--labels inspiral postinspiral "
1043 "--imrct_kwargs N_bins:11 "
1044 "--make_diagnostic_plots "
1045 "--disable_pe_page_generation"
1046 )
1047 self.launch(command_line)
1048 self.check_output()
1050 @pytest.mark.executabletest
1051 def test_pesummary_file(self):
1052 """Test the `summarytgr` executable for a pesummary metafile
1053 """
1054 command_line = (
1055 f"summarytgr --webdir {tmpdir} --samples "
1056 f"{tmpdir}/pesummary.json {tmpdir}/pesummary.json --labels "
1057 "test:inspiral test:postinspiral --test imrct --imrct_kwargs "
1058 "N_bins:11 --disable_pe_page_generation"
1059 )
1060 self.launch(command_line)
1061 self.check_output(diagnostic=False)
1063 @pytest.mark.executabletest
1064 def test_pdfs_and_gr_quantile(self):
1065 """Test that the GR quantile and pdf matches the LAL implementation
1066 The LAL files were produced by the executable imrtgr_imr_consistency_test
1067 with N_bins=201 dMfbyMf_lim=3 dchifbychif_lim=3 and bbh_average_fits_precessing
1068 """
1069 from pesummary.io import read
1071 make_result_file(outdir="./", extension="dat", gw=True, random_seed=123456789)
1072 os.rename("./test.dat", f"{tmpdir}/inspiral.dat")
1073 make_result_file(outdir="./", extension="dat", gw=True, random_seed=987654321)
1074 os.rename("./test.dat", f"{tmpdir}/postinspiral.dat")
1075 command_line = (
1076 f"summarytgr --webdir {tmpdir} "
1077 f"--samples {tmpdir}/inspiral.dat {tmpdir}/postinspiral.dat "
1078 "--test imrct "
1079 "--labels inspiral postinspiral "
1080 "--imrct_kwargs N_bins:201 final_mass_deviation_lim:3 final_spin_deviation_lim:3 "
1081 "--disable_pe_page_generation"
1082 )
1083 self.launch(command_line)
1084 f = read(f"{tmpdir}/samples/tgr_samples.h5")
1085 pesummary_quantile = f.extra_kwargs["primary"]["GR Quantile (%)"]
1086 probdict = f.imrct_deviation["final_mass_final_spin_deviations"]
1087 lal_pdf = np.loadtxt(os.path.join(data_dir, "lal_pdf_for_summarytgr.dat.gz"))
1088 pesummary_pdf = probdict.probs / probdict.dx / probdict.dy
1090 np.testing.assert_almost_equal(pesummary_quantile, 3.276372814744687306)
1091 np.testing.assert_almost_equal(pesummary_pdf, lal_pdf)
1094class TestSummaryClean(Base):
1095 """Test the `summaryclean` executable
1096 """
1097 def setup_method(self):
1098 """Setup the SummaryClassification class
1099 """
1100 if not os.path.isdir(tmpdir):
1101 os.mkdir(tmpdir)
1103 def teardown_method(self):
1104 """Remove the files and directories created from this class
1105 """
1106 if os.path.isdir(tmpdir):
1107 shutil.rmtree(tmpdir)
1109 @pytest.mark.executabletest
1110 def test_clean(self):
1111 """Test the `summaryclean` executable
1112 """
1113 import h5py
1115 parameters = ["mass_ratio"]
1116 data = [[0.5], [0.5], [-1.5]]
1117 h5py_data = np.array(
1118 [tuple(i) for i in data], dtype=[tuple([i, 'float64']) for i in
1119 parameters]
1120 )
1121 f = h5py.File("{}/test.hdf5".format(tmpdir), "w")
1122 lalinference = f.create_group("lalinference")
1123 nest = lalinference.create_group("lalinference_nest")
1124 samples = nest.create_dataset("posterior_samples", data=h5py_data)
1125 f.close()
1126 command_line = (
1127 "summaryclean --webdir {0} --samples {0}/test.hdf5 "
1128 "--file_format dat --labels test".format(tmpdir)
1129 )
1130 self.launch(command_line)
1131 self.check_output()
1133 def check_output(self):
1134 """Check the output from the `summaryclean` executable
1135 """
1136 from pesummary.gw.file.read import read
1138 f = read("{}/pesummary_test.dat".format(tmpdir))
1139 print(f.samples_dict["mass_ratio"])
1140 assert len(f.samples_dict["mass_ratio"]) == 2
1141 assert all(i == 0.5 for i in f.samples_dict["mass_ratio"])
1144class _SummaryCombine_Metafiles(Base):
1145 """Test the `summarycombine_metafile` executable
1146 """
1147 @pytest.mark.executabletest
1148 def test_combine(self, gw=False):
1149 """Test the executable for 2 metafiles
1150 """
1151 make_result_file(outdir=tmpdir, pesummary=True, pesummary_label="label2")
1152 os.rename("{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir))
1153 make_result_file(outdir=tmpdir, pesummary=True)
1154 command_line = (
1155 "summarycombine --webdir {0} "
1156 "--samples {0}/test.json {0}/test2.json "
1157 "--save_to_json".format(tmpdir)
1158 )
1159 if gw:
1160 command_line += " --gw"
1161 self.launch(command_line)
1163 def check_output(self, gw=False):
1164 if gw:
1165 from pesummary.gw.file.read import read
1166 else:
1167 from pesummary.core.file.read import read
1169 assert os.path.isfile("{}/samples/posterior_samples.json".format(tmpdir))
1170 combined = read("{}/samples/posterior_samples.json".format(tmpdir))
1171 for f in ["{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir)]:
1172 data = read(f)
1173 labels = data.labels
1174 assert all(i in combined.labels for i in labels)
1175 assert all(
1176 all(
1177 data.samples_dict[j][num] == combined.samples_dict[i][j][num]
1178 for num in range(data.samples_dict[j])
1179 ) for j in data.samples_dict.keys()
1180 )
1183class TestCoreSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1184 """Test the `summarycombine_metafile` executable
1185 """
1186 def setup_method(self):
1187 """Setup the SummaryCombine_Metafiles class
1188 """
1189 if not os.path.isdir(tmpdir):
1190 os.mkdir(tmpdir)
1191 make_result_file(outdir=tmpdir, pesummary=True)
1193 def teardown_method(self):
1194 """Remove the files and directories created from this class
1195 """
1196 if os.path.isdir(tmpdir):
1197 shutil.rmtree(tmpdir)
1199 @pytest.mark.executabletest
1200 def test_combine(self):
1201 """Test the executable for 2 metafiles
1202 """
1203 super(TestCoreSummaryCombine_Metafiles, self).test_combine(gw=False)
1205 def check_output(self):
1206 super(TestCoreSummaryCombine_Metafiles, self).check_output(gw=False)
1209class TestGWSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1210 """Test the `summarycombine_metafile` executable
1211 """
1212 def setup_method(self):
1213 """Setup the SummaryCombine_Metafiles class
1214 """
1215 if not os.path.isdir(tmpdir):
1216 os.mkdir(tmpdir)
1217 make_result_file(outdir=tmpdir, pesummary=True)
1219 def teardown_method(self):
1220 """Remove the files and directories created from this class
1221 """
1222 if os.path.isdir(tmpdir):
1223 shutil.rmtree(tmpdir)
1225 @pytest.mark.executabletest
1226 def test_combine(self):
1227 """Test the executable for 2 metafiles
1228 """
1229 super(TestGWSummaryCombine_Metafiles, self).test_combine(gw=True)
1231 def check_output(self, gw=True):
1232 super(TestGWSummaryCombine_Metafiles, self).check_output(gw=True)
1235class TestSummaryCombine(Base):
1236 """Test the `summarycombine` executable
1237 """
1238 def setup_method(self):
1239 """Setup the SummaryCombine class
1240 """
1241 self.dirs = [tmpdir]
1242 for dd in self.dirs:
1243 if not os.path.isdir(dd):
1244 os.mkdir(dd)
1246 def teardown_method(self):
1247 """Remove the files and directories created from this class
1248 """
1249 for dd in self.dirs:
1250 if os.path.isdir(dd):
1251 shutil.rmtree(dd)
1253 @pytest.mark.executabletest
1254 def test_disable_prior_sampling(self):
1255 """Test that the code skips prior sampling when the appropiate flag
1256 is provided to the `summarypages` executable
1257 """
1258 from pesummary.io import read
1260 make_result_file(outdir=tmpdir, bilby=True, gw=False)
1261 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
1262 command_line = (
1263 "summarycombine --webdir {0} --samples {0}/bilby.json "
1264 "--labels core0".format(tmpdir)
1265 )
1266 self.launch(command_line)
1267 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1268 assert len(f.priors["samples"]["core0"])
1270 command_line = (
1271 "summarycombine --webdir {0} --samples {0}/bilby.json "
1272 "--disable_prior_sampling --labels core0".format(tmpdir)
1273 )
1274 self.launch(command_line)
1275 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1276 assert not len(f.priors["samples"]["core0"])
1278 @pytest.mark.executabletest
1279 def test_external_hdf5_links(self):
1280 """Test that seperate hdf5 files are made when the
1281 `--external_hdf5_links` command line is passed
1282 """
1283 from pesummary.gw.file.read import read
1284 from .base import make_psd, make_calibration
1286 make_result_file(outdir=tmpdir, gw=True, extension="json")
1287 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1288 make_psd(outdir=tmpdir)
1289 make_calibration(outdir=tmpdir)
1290 command_line = (
1291 "summarycombine --webdir {0} --samples "
1292 "{0}/example.json --label gw0 --external_hdf5_links --gw "
1293 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1294 "--no_conversion".format(tmpdir)
1295 )
1296 self.launch(command_line)
1297 assert os.path.isfile(
1298 os.path.join(tmpdir, "samples", "posterior_samples.h5")
1299 )
1300 assert os.path.isfile(
1301 os.path.join(tmpdir, "samples", "_gw0.h5")
1302 )
1303 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1304 g = read("{}/example.json".format(tmpdir))
1305 h = read("{}/samples/_gw0.h5".format(tmpdir))
1306 np.testing.assert_almost_equal(f.samples[0], g.samples)
1307 np.testing.assert_almost_equal(f.samples[0], h.samples[0])
1308 np.testing.assert_almost_equal(f.psd["gw0"]["H1"], h.psd["gw0"]["H1"])
1309 np.testing.assert_almost_equal(
1310 f.priors["calibration"]["gw0"]["L1"],
1311 h.priors["calibration"]["gw0"]["L1"]
1312 )
1314 @pytest.mark.executabletest
1315 def test_compression(self):
1316 """Test that the metafile is reduced in size when the datasets are
1317 compressed with maximum compression level
1318 """
1319 from pesummary.gw.file.read import read
1320 from .base import make_psd, make_calibration
1322 make_result_file(outdir=tmpdir, gw=True, extension="json")
1323 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1324 make_psd(outdir=tmpdir)
1325 make_calibration(outdir=tmpdir)
1326 command_line = (
1327 "summarycombine --webdir {0} --samples "
1328 "{0}/example.json --label gw0 --no_conversion --gw "
1329 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat ".format(
1330 tmpdir
1331 )
1332 )
1333 self.launch(command_line)
1334 original_size = os.stat("{}/samples/posterior_samples.h5".format(tmpdir)).st_size
1335 command_line = (
1336 "summarycombine --webdir {0} --samples "
1337 "{0}/example.json --label gw0 --no_conversion --gw "
1338 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1339 "--hdf5_compression 9 --posterior_samples_filename "
1340 "posterior_samples2.h5".format(tmpdir)
1341 )
1342 self.launch(command_line)
1343 compressed_size = os.stat("{}/samples/posterior_samples2.h5".format(tmpdir)).st_size
1344 assert compressed_size < original_size
1346 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1347 g = read("{}/samples/posterior_samples2.h5".format(tmpdir))
1348 posterior_samples = f.samples[0]
1349 posterior_samples2 = g.samples[0]
1350 np.testing.assert_almost_equal(posterior_samples, posterior_samples2)
1352 @pytest.mark.executabletest
1353 def test_seed(self):
1354 """Test that the samples stored in the metafile are identical for two
1355 runs if the random seed is the same
1356 """
1357 from pesummary.gw.file.read import read
1359 make_result_file(outdir=tmpdir, gw=True, extension="json")
1360 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1361 command_line = (
1362 "summarycombine --webdir {0} --samples "
1363 "{0}/example.json --label gw0 --no_conversion --gw "
1364 "--nsamples 10 --seed 1000".format(tmpdir)
1365 )
1366 self.launch(command_line)
1367 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1368 command_line = (
1369 "summarycombine --webdir {0} --samples "
1370 "{0}/example.json --label gw0 --no_conversion --gw "
1371 "--nsamples 10 --seed 2000".format(tmpdir)
1372 )
1373 self.launch(command_line)
1374 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1375 try:
1376 np.testing.assert_almost_equal(
1377 original.samples[0], new.samples[0]
1378 )
1379 raise AssertionError("Failed")
1380 except AssertionError:
1381 pass
1383 command_line = (
1384 "summarycombine --webdir {0} --samples "
1385 "{0}/example.json --label gw0 --no_conversion --gw "
1386 "--nsamples 10 --seed 1000".format(tmpdir)
1387 )
1388 self.launch(command_line)
1389 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1390 command_line = (
1391 "summarycombine --webdir {0} --samples "
1392 "{0}/example.json --label gw0 --no_conversion --gw "
1393 "--nsamples 10 --seed 1000".format(tmpdir)
1394 )
1395 self.launch(command_line)
1396 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1397 np.testing.assert_almost_equal(
1398 original.samples[0], new.samples[0]
1399 )
1401 @pytest.mark.executabletest
1402 def test_preferred(self):
1403 """Test that the preferred analysis is correctly stored in the metafile
1404 """
1405 from pesummary.io import read
1406 make_result_file(gw=True, extension="json", outdir=tmpdir)
1407 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1408 command_line = (
1409 "summarycombine --webdir {0} --samples "
1410 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1411 "--gw --nsamples 10".format(tmpdir)
1412 )
1413 self.launch(command_line)
1414 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1415 assert f.preferred is None
1416 command_line = (
1417 "summarycombine --webdir {0} --samples "
1418 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1419 "--gw --nsamples 10 --preferred gw1".format(tmpdir)
1420 )
1421 self.launch(command_line)
1422 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1423 assert f.preferred == "gw1"
1424 command_line = (
1425 "summarycombine --webdir {0} --samples "
1426 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1427 "--gw --nsamples 10 --preferred gw2".format(tmpdir)
1428 )
1429 self.launch(command_line)
1430 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1431 assert f.preferred is None
1434class TestSummaryReview(Base):
1435 """Test the `summaryreview` executable
1436 """
1437 def setup_method(self):
1438 """Setup the SummaryCombine_Metafiles class
1439 """
1440 if not os.path.isdir(tmpdir):
1441 os.mkdir(tmpdir)
1442 make_result_file(outdir=tmpdir, lalinference=True)
1444 def teardown_method(self):
1445 """Remove the files and directories created from this class
1446 """
1447 if os.path.isdir(tmpdir):
1448 shutil.rmtree(tmpdir)
1450 @pytest.mark.executabletest
1451 def test_review(self):
1452 """Test the `summaryreview` script for a `lalinference` result file
1453 """
1454 command_line = (
1455 "summaryreview --webdir {0} --samples {0}/test.hdf5 "
1456 "--test core_plots".format(tmpdir)
1457 )
1458 self.launch(command_line)
1461class TestSummarySplit(Base):
1462 """Test the `summarysplit` executable
1463 """
1464 def setup_method(self):
1465 """Setup the SummarySplit class
1466 """
1467 if not os.path.isdir(tmpdir):
1468 os.mkdir(tmpdir)
1469 make_result_file(outdir=tmpdir, gw=False, extension="json")
1470 make_result_file(outdir=tmpdir, gw=False, extension="hdf5", n_samples=500)
1472 def teardown_method(self):
1473 """Remove the files and directories created from this class
1474 """
1475 if os.path.isdir(tmpdir):
1476 shutil.rmtree(tmpdir)
1478 @pytest.mark.executabletest
1479 def test_split_single_analysis(self):
1480 """Test that a file containing a single analysis is successfully split
1481 into N_samples result files
1482 """
1483 from pesummary.io import read
1484 command_line = (
1485 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1486 f"--outdir {tmpdir}/split"
1487 )
1488 self.launch(command_line)
1489 original = read(f"{tmpdir}/test.json").samples_dict
1490 files = glob.glob(f"{tmpdir}/split/*.json")
1491 assert len(files) == original.number_of_samples
1492 for num, f in enumerate(files):
1493 g = read(f).samples_dict
1494 assert g.number_of_samples == 1
1495 idx = int(f.split("/")[-1].split("_")[-1].split(".")[0])
1496 for param in g.keys():
1497 assert g[param] == original[param][idx]
1498 command_line = (
1499 "summarycombine_posteriors --use_all --samples {} "
1500 f"--outdir {tmpdir} --filename combined_split.dat "
1501 "--file_format dat --labels {}"
1502 ).format(
1503 " ".join(files), " ".join(
1504 np.arange(original.number_of_samples).astype(str)
1505 )
1506 )
1507 self.launch(command_line)
1508 combined = read(f"{tmpdir}/combined_split.dat").samples_dict
1509 assert all(param in original.keys() for param in combined.keys())
1510 for param in original.keys():
1511 assert all(sample in combined[param] for sample in original[param])
1512 assert all(sample in original[param] for sample in combined[param])
1514 @pytest.mark.executabletest
1515 def test_split_single_analysis_specific_N_files(self):
1516 """Test that a file containing a single analysis is successfully split
1517 into 10 result files
1518 """
1519 from pesummary.io import read
1520 command_line = (
1521 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1522 f"--outdir {tmpdir}/split --N_files 10"
1523 )
1524 self.launch(command_line)
1525 original = read(f"{tmpdir}/test.json").samples_dict
1526 files = glob.glob(f"{tmpdir}/split/*.json")
1527 assert len(files) == 10
1528 for num, f in enumerate(files):
1529 g = read(f).samples_dict
1530 for param in g.keys():
1531 assert all(sample in original[param] for sample in g[param])
1533 @pytest.mark.executabletest
1534 def test_split_multi_analysis(self):
1535 """Test that a file containing multiple analyses is successfully split
1536 into N_samples result files
1537 """
1538 from pesummary.io import read
1539 command_line = (
1540 f"summarycombine --webdir {tmpdir} --samples {tmpdir}/test.json "
1541 f"{tmpdir}/test.h5 --labels one two"
1542 )
1543 self.launch(command_line)
1544 command_line = (
1545 f"summarysplit --samples {tmpdir}/samples/posterior_samples.h5 "
1546 f"--file_format hdf5 --outdir {tmpdir}/split"
1547 )
1548 self.launch(command_line)
1549 assert os.path.isdir(f"{tmpdir}/split/one")
1550 assert os.path.isdir(f"{tmpdir}/split/two")
1551 zipped = zip(["one", "two"], [f"{tmpdir}/test.json", f"{tmpdir}/test.h5"])
1552 for analysis, f in zipped:
1553 original = read(f).samples_dict
1554 files = glob.glob(f"{tmpdir}/split/{analysis}/*.hdf5")
1555 assert len(files) == original.number_of_samples
1556 for num, g in enumerate(files):
1557 h = read(g).samples_dict
1558 assert h.number_of_samples == 1
1559 idx = int(g.split("/")[-1].split("_")[-1].split(".")[0])
1560 for param in h.keys():
1561 assert h[param] == original[param][idx]
1563class TestSummaryExtract(Base):
1564 """Test the `summaryextract` executable
1565 """
1566 def setup_method(self):
1567 """Setup the SummaryExtract class
1568 """
1569 if not os.path.isdir(tmpdir):
1570 os.mkdir(tmpdir)
1571 make_result_file(outdir=tmpdir, gw=False, extension="json")
1572 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1573 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
1574 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1576 def teardown_method(self):
1577 """Remove the files and directories created from this class
1578 """
1579 if os.path.isdir(tmpdir):
1580 shutil.rmtree(tmpdir)
1582 @pytest.mark.executabletest
1583 def test_extract(self):
1584 """Test that a set if posterior samples are correctly extracted
1585 """
1586 from pesummary.io import read
1587 command_line = (
1588 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1589 f"--labels one two --webdir {tmpdir}"
1590 )
1591 self.launch(command_line)
1592 command_line = (
1593 f"summaryextract --outdir {tmpdir} --filename one.dat --file_format dat "
1594 f"--samples {tmpdir}/samples/posterior_samples.h5 --label one"
1595 )
1596 self.launch(command_line)
1597 assert os.path.isfile(f"{tmpdir}/one.dat")
1598 extracted = read(f"{tmpdir}/one.dat").samples_dict
1599 original = read(f"{tmpdir}/example.json").samples_dict
1600 assert all(param in extracted.keys() for param in original.keys())
1601 np.testing.assert_almost_equal(extracted.samples, original.samples)
1602 command_line = (
1603 f"summaryextract --outdir {tmpdir} --filename one.h5 --label one "
1604 "--file_format pesummary "
1605 f"--samples {tmpdir}/samples/posterior_samples.h5 "
1606 )
1607 self.launch(command_line)
1608 assert os.path.isfile(f"{tmpdir}/one.h5")
1609 extracted = read(f"{tmpdir}/one.h5").samples_dict
1610 assert "dataset" in extracted.keys()
1611 assert all(param in extracted["dataset"].keys() for param in original.keys())
1612 np.testing.assert_almost_equal(extracted["dataset"].samples, original.samples)
1615class TestSummaryCombine_Posteriors(Base):
1616 """Test the `summarycombine_posteriors` executable
1617 """
1618 def setup_method(self):
1619 """Setup the SummaryCombine_Posteriors class
1620 """
1621 if not os.path.isdir(tmpdir):
1622 os.mkdir(tmpdir)
1623 make_result_file(outdir=tmpdir, gw=True, extension="json")
1624 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1625 make_result_file(outdir=tmpdir, gw=True, extension="hdf5")
1626 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1627 make_result_file(outdir=tmpdir, gw=True, extension="dat")
1628 os.rename(f"{tmpdir}/test.dat", f"{tmpdir}/example3.dat")
1630 def teardown_method(self):
1631 """Remove the files and directories created from this class
1632 """
1633 if os.path.isdir(tmpdir):
1634 shutil.rmtree(tmpdir)
1636 @pytest.mark.executabletest
1637 def test_combine(self):
1638 """Test that the two posteriors are combined
1639 """
1640 from pesummary.io import read
1641 command_line = (
1642 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1643 f"--file_format dat --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1644 "--labels one two --weights 0.5 0.5 --seed 12345"
1645 )
1646 self.launch(command_line)
1647 assert os.path.isfile(f"{tmpdir}/test.dat")
1648 combined = read(f"{tmpdir}/test.dat").samples_dict
1649 one = read(f"{tmpdir}/example.json").samples_dict
1650 two = read(f"{tmpdir}/example2.h5").samples_dict
1651 nsamples = combined.number_of_samples
1652 half = int(nsamples / 2.)
1653 for param in combined.keys():
1654 assert all(ss in one[param] for ss in combined[param][:half])
1655 assert all(ss in two[param] for ss in combined[param][half:])
1657 @pytest.mark.executabletest
1658 def test_combine_metafile_failures(self):
1659 """Test that errors are raised when incorrect labels are passed when "
1660 trying to combine posteriors from a single metafile and when trying
1661 to combine posteriors from multiple metafiles
1662 """
1663 command_line = (
1664 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1665 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1666 "--no_conversion"
1667 )
1668 self.launch(command_line)
1669 with pytest.raises(Exception):
1670 command_line = (
1671 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1672 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1673 "--labels one four --weights 0.5 0.5 --seed 12345"
1674 )
1675 self.launch(command_line)
1676 with pytest.raises(Exception):
1677 command_line = (
1678 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1679 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1680 f"{tmpdir}/samples/posterior_samples.h5 --labels one two "
1681 "--weights 0.5 0.5 --seed 12345"
1682 )
1683 self.launch(command_line)
1684 with pytest.raises(Exception):
1685 command_line = (
1686 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1687 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1688 f"{tmpdir}/example3.dat --labels one two --weights 0.5 0.5 --seed 12345"
1689 )
1690 self.launch(command_line)
1692 @pytest.mark.executabletest
1693 def test_combine_metafile(self):
1694 """Test that the two posteriors are combined when a single metafile
1695 is provided
1696 """
1697 from pesummary.io import read
1698 command_line = (
1699 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1700 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1701 "--no_conversion"
1702 )
1703 self.launch(command_line)
1704 command_line = (
1705 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1706 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1707 "--labels one two --weights 0.5 0.5 --seed 12345"
1708 )
1709 self.launch(command_line)
1710 assert os.path.isfile(f"{tmpdir}/test.dat")
1711 combined = read(f"{tmpdir}/test.dat").samples_dict
1712 one = read(f"{tmpdir}/example.json").samples_dict
1713 two = read(f"{tmpdir}/example2.h5").samples_dict
1714 nsamples = combined.number_of_samples
1715 half = int(nsamples / 2.)
1716 for param in combined.keys():
1717 assert all(ss in one[param] for ss in combined[param][:half])
1718 assert all(ss in two[param] for ss in combined[param][half:])
1720 # test that you add the samples to the original file
1721 command_line = (
1722 f"summarycombine_posteriors --outdir {tmpdir} --filename test.h5 "
1723 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1724 "--labels one two --weights 0.5 0.5 --seed 12345 --add_to_existing"
1725 )
1726 self.launch(command_line)
1727 assert os.path.isfile(f"{tmpdir}/test.h5")
1728 combined = read(f"{tmpdir}/test.h5")
1729 combined_samples = combined.samples_dict
1730 assert "one_two_combined" in combined.labels
1731 assert "one_two_combined" in combined_samples.keys()
1732 combined_samples = combined_samples["one_two_combined"]
1733 for param in combined_samples.keys():
1734 assert all(ss in one[param] for ss in combined_samples[param][:half])
1735 assert all(ss in two[param] for ss in combined_samples[param][half:])
1736 # check that summarypages works fine on output
1737 command_line = (
1738 f"summarypages --webdir {tmpdir}/combined "
1739 f" --no_conversion --samples {tmpdir}/test.h5 "
1740 "--disable_corner --disable_interactive --gw"
1741 )
1742 self.launch(command_line)
1743 assert os.path.isfile(f"{tmpdir}/combined/samples/posterior_samples.h5")
1744 output = read(f"{tmpdir}/combined/samples/posterior_samples.h5")
1745 assert "one_two_combined" in output.labels
1748class TestSummaryModify(Base):
1749 """Test the `summarymodify` executable
1750 """
1751 def setup_method(self):
1752 """Setup the SummaryModify class
1753 """
1754 if not os.path.isdir(tmpdir):
1755 os.mkdir(tmpdir)
1756 make_result_file(
1757 pesummary=True, pesummary_label="replace", extension="hdf5",
1758 outdir=tmpdir
1759 )
1761 def teardown_method(self):
1762 """Remove the files and directories created from this class
1763 """
1764 if os.path.isdir(tmpdir):
1765 shutil.rmtree(tmpdir)
1767 @pytest.mark.executabletest
1768 def test_preferred(self):
1769 """Test that the preferred run is correctly specified in the meta file
1770 """
1771 from pesummary.io import read
1772 make_result_file(extension="json", bilby=True, gw=True, outdir=tmpdir)
1773 make_result_file(extension="dat", gw=True, outdir=tmpdir)
1774 command_line = (
1775 "summarycombine --webdir {0} --samples {0}/test.json "
1776 "{0}/test.dat --no_conversion --gw --labels one two "
1777 "--nsamples 100".format(
1778 tmpdir
1779 )
1780 )
1781 self.launch(command_line)
1782 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1783 assert f.preferred is None
1784 command_line = (
1785 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1786 "--webdir {0} --preferred two".format(tmpdir)
1787 )
1788 self.launch(command_line)
1789 f = read("{0}/modified_posterior_samples.h5".format(tmpdir))
1790 assert f.preferred == "two"
1792 @pytest.mark.executabletest
1793 def test_descriptions(self):
1794 """Test that the descriptions are correctly replaced in the meta file
1795 """
1796 import json
1797 import h5py
1799 command_line = (
1800 'summarymodify --webdir {0} --samples {0}/test.h5 '
1801 '--descriptions replace:TestingSummarymodify'.format(tmpdir)
1802 )
1803 self.launch(command_line)
1804 modified_data = h5py.File(
1805 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1806 )
1807 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1808 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1809 if "description" in original_data["replace"].keys():
1810 assert original_data["replace"]["description"][0] != b'TestingSummarymodify'
1811 assert modified_data["replace"]["description"][0] == b'TestingSummarymodify'
1812 modified_data.close()
1813 original_data.close()
1815 with open("{}/descriptions.json".format(tmpdir), "w") as f:
1816 json.dump({"replace": "NewDescription"}, f)
1818 command_line = (
1819 'summarymodify --webdir {0} --samples {0}/test.h5 '
1820 '--descriptions {0}/descriptions.json'.format(tmpdir)
1821 )
1822 self.launch(command_line)
1823 modified_data = h5py.File(
1824 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1825 )
1826 assert modified_data["replace"]["description"][0] == b'NewDescription'
1827 modified_data.close()
1829 @pytest.mark.executabletest
1830 def test_modify_config(self):
1831 """Test that the config file is correctly replaced in the meta file
1832 """
1833 import configparser
1834 import h5py
1835 user = getuser()
1836 config = configparser.ConfigParser()
1837 config.optionxform = str
1838 config.read(data_dir + "/config_lalinference.ini")
1839 config_dictionary = dict(config._sections)
1840 config_dictionary["paths"]["webdir"] = (
1841 "./{}/webdir".format(user)
1842 )
1843 make_result_file(
1844 pesummary=True, pesummary_label="replace", extension="hdf5",
1845 config=config_dictionary, outdir=tmpdir
1846 )
1847 f = h5py.File("{}/test.h5".format(tmpdir), "r")
1848 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1849 bytes("./{}/webdir".format(user), "utf-8")
1850 )
1851 f.close()
1852 config.read(data_dir + "/config_lalinference.ini")
1853 config_dictionary = dict(config._sections)
1854 config_dictionary["paths"]["webdir"] = "./replace/webdir"
1855 with open('{}/replace_config.ini'.format(tmpdir), 'w') as configfile:
1856 config.write(configfile)
1857 command_line = (
1858 "summarymodify --webdir {0} --samples {0}/test.h5 "
1859 "--config replace:{0}/replace_config.ini".format(tmpdir)
1860 )
1861 self.launch(command_line)
1862 f = h5py.File("{}/modified_posterior_samples.h5".format(tmpdir), "r")
1863 assert f["replace"]["config_file"]["paths"]["webdir"][0] != (
1864 bytes("./{}/webdir".format(user), "utf-8")
1865 )
1866 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1867 bytes("./replace/webdir", "utf-8")
1868 )
1869 f.close()
1871 @pytest.mark.executabletest
1872 def test_modify_kwargs_replace(self):
1873 """Test that kwargs are correctly replaced in the meta file
1874 """
1875 import h5py
1877 command_line = (
1878 "summarymodify --webdir {0} --samples {0}/test.h5 "
1879 "--delimiter / --kwargs replace/log_evidence:1000".format(
1880 tmpdir
1881 )
1882 )
1883 self.launch(command_line)
1884 modified_data = h5py.File(
1885 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1886 )
1887 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1888 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1889 assert original_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] != b'1000'
1890 assert modified_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] == b'1000'
1891 modified_data.close()
1892 original_data.close()
1894 @pytest.mark.executabletest
1895 def test_modify_kwargs_append(self):
1896 """Test that kwargs are correctly added to the result file
1897 """
1898 import h5py
1900 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1901 assert "other" not in original_data["replace"]["meta_data"].keys()
1902 original_data.close()
1903 command_line = (
1904 "summarymodify --webdir {0} --samples {0}/test.h5 "
1905 "--delimiter / --kwargs replace/test:10 "
1906 "--overwrite".format(tmpdir)
1907 )
1908 self.launch(command_line)
1909 modified_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1910 assert modified_data["replace"]["meta_data"]["other"]["test"][0] == b'10'
1911 modified_data.close()
1913 @pytest.mark.executabletest
1914 def test_modify_posterior(self):
1915 """Test that a posterior distribution is correctly modified
1916 """
1917 import h5py
1919 new_posterior = np.random.uniform(10, 0.5, 1000)
1920 np.savetxt("{}/different_posterior.dat".format(tmpdir), new_posterior)
1921 command_line = (
1922 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1923 "--replace_posterior replace;mass_1:{0}/different_posterior.dat".format(
1924 tmpdir
1925 )
1926 )
1927 self.launch(command_line)
1928 modified_data = h5py.File(
1929 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1930 )
1931 np.testing.assert_almost_equal(
1932 modified_data["replace"]["posterior_samples"]["mass_1"], new_posterior
1933 )
1934 modified_data.close()
1935 command_line = (
1936 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1937 "--replace_posterior replace;abc:{0}/different_posterior.dat".format(
1938 tmpdir
1939 )
1940 )
1941 self.launch(command_line)
1942 modified_data = h5py.File(
1943 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1944 )
1945 np.testing.assert_almost_equal(
1946 modified_data["replace"]["posterior_samples"]["abc"], new_posterior
1947 )
1948 modified_data.close()
1950 @pytest.mark.executabletest
1951 def test_remove_label(self):
1952 """Test that an analysis is correctly removed
1953 """
1954 from pesummary.io import read
1955 make_result_file(gw=True, extension="json", outdir=tmpdir)
1956 os.rename(
1957 "{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir)
1958 )
1959 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1960 os.rename(
1961 "{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir)
1962 )
1963 make_result_file(gw=True, extension="dat", outdir=tmpdir)
1964 os.rename(
1965 "{}/test.dat".format(tmpdir), "{}/example3.dat".format(tmpdir)
1966 )
1967 command_line = (
1968 "summarycombine --samples {0}/example.json {0}/example2.h5 "
1969 "{0}/example3.dat --labels one two three --webdir {0} "
1970 "--no_conversion".format(tmpdir)
1971 )
1972 self.launch(command_line)
1973 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1974 assert all(label in original.labels for label in ["one", "two", "three"])
1975 command_line = (
1976 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1977 "--remove_label one --webdir {0}".format(tmpdir)
1978 )
1979 self.launch(command_line)
1980 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1981 assert "one" not in f.labels
1982 assert all(label in f.labels for label in ["two", "three"])
1983 _original_samples = original.samples_dict
1984 _samples = f.samples_dict
1985 for label in ["two", "three"]:
1986 np.testing.assert_almost_equal(
1987 _original_samples[label].samples, _samples[label].samples
1988 )
1989 command_line = (
1990 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1991 "--remove_label example --webdir {0}".format(tmpdir)
1992 )
1993 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1994 assert "one" not in f.labels
1995 assert all(label in f.labels for label in ["two", "three"])
1997 @pytest.mark.executabletest
1998 def test_remove_posterior(self):
1999 """Test that a posterior is correctly removed
2000 """
2001 import h5py
2003 command_line = (
2004 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
2005 "--remove_posterior replace;mass_1".format(tmpdir)
2006 )
2007 self.launch(command_line)
2008 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
2009 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
2010 if isinstance(params[0], bytes):
2011 params = [param.decode("utf-8") for param in params]
2012 assert "mass_1" in params
2013 modified_data = h5py.File(
2014 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2015 )
2016 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2017 original_data.close()
2018 modified_data.close()
2020 @pytest.mark.executabletest
2021 def test_remove_multiple_posteriors(self):
2022 """Test that multiple posteriors are correctly removed
2023 """
2024 import h5py
2026 command_line = (
2027 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
2028 "--remove_posterior replace;mass_1 replace;mass_2".format(
2029 tmpdir
2030 )
2031 )
2032 self.launch(command_line)
2033 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
2034 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
2035 if isinstance(params[0], bytes):
2036 params = [param.decode("utf-8") for param in params]
2037 assert "mass_1" in params
2038 assert "mass_2" in params
2039 modified_data = h5py.File(
2040 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2041 )
2042 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2043 assert "mass_2" not in modified_data["replace"]["posterior_samples"].dtype.names
2044 original_data.close()
2045 modified_data.close()
2047 @pytest.mark.executabletest
2048 def test_store_skymap(self):
2049 """Test that multiple skymaps are correctly stored
2050 """
2051 import astropy_healpix as ah
2052 from ligo.skymap.io.fits import write_sky_map
2053 import h5py
2055 nside = 128
2056 npix = ah.nside_to_npix(nside)
2057 prob = np.random.random(npix)
2058 prob /= sum(prob)
2060 write_sky_map(
2061 '{}/test.fits'.format(tmpdir), prob,
2062 objid='FOOBAR 12345',
2063 gps_time=10494.3,
2064 creator="test",
2065 origin='LIGO Scientific Collaboration',
2066 )
2067 command_line = (
2068 "summarymodify --webdir {0} --samples {0}/test.h5 "
2069 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2070 )
2071 self.launch(command_line)
2072 modified_data = h5py.File(
2073 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2074 )
2075 assert "skymap" in modified_data["replace"].keys()
2076 np.testing.assert_almost_equal(
2077 modified_data["replace"]["skymap"]["data"], prob
2078 )
2079 np.testing.assert_almost_equal(
2080 modified_data["replace"]["skymap"]["meta_data"]["gps_time"][0], 10494.3
2081 )
2082 _creator = modified_data["replace"]["skymap"]["meta_data"]["creator"][0]
2083 if isinstance(_creator, bytes):
2084 _creator = _creator.decode("utf-8")
2085 assert _creator == "test"
2087 command_line = (
2088 "summarymodify --webdir {0} "
2089 "--samples {0}/modified_posterior_samples.h5 "
2090 "--store_skymap replace:{0}/test.fits --force_replace".format(
2091 tmpdir
2092 )
2093 )
2094 self.launch(command_line)
2095 command_line = (
2096 "summarypages --webdir {0}/webpage --gw --no_conversion "
2097 "--samples {0}/modified_posterior_samples.h5 ".format(tmpdir)
2098 )
2099 self.launch(command_line)
2100 data = h5py.File(
2101 "{}/webpage/samples/posterior_samples.h5".format(tmpdir), "r"
2102 )
2103 np.testing.assert_almost_equal(data["replace"]["skymap"]["data"], prob)
2104 data.close()
2105 with pytest.raises(ValueError):
2106 command_line = (
2107 "summarymodify --webdir {0} "
2108 "--samples {0}/modified_posterior_samples.h5 "
2109 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2110 )
2111 self.launch(command_line)
2113 @pytest.mark.executabletest
2114 def test_modify(self):
2115 """Test the `summarymodify` script
2116 """
2117 import h5py
2119 command_line = (
2120 "summarymodify --webdir {0} --samples {0}/test.h5 "
2121 "--labels replace:new".format(tmpdir)
2122 )
2123 self.launch(command_line)
2124 modified_data = h5py.File(
2125 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2126 )
2127 data = h5py.File("{}/test.h5".format(tmpdir), "r")
2128 assert "replace" not in list(modified_data.keys())
2129 assert "new" in list(modified_data.keys())
2130 for key in data["replace"].keys():
2131 assert key in modified_data["new"].keys()
2132 for i, j in zip(data["replace"][key], modified_data["new"][key]):
2133 try:
2134 if isinstance(data["replace"][key][i],h5py._hl.dataset.Dataset):
2135 try:
2136 assert all(k == l for k, l in zip(
2137 data["replace"][key][i],
2138 modified_data["new"][key][j]
2139 ))
2140 except ValueError:
2141 assert all(
2142 all(m == n for m, n in zip(k, l)) for k, l in zip(
2143 data["replace"][key][i],
2144 modified_data["new"][key][j]
2145 )
2146 )
2147 except TypeError:
2148 pass
2149 data.close()
2150 modified_data.close()
2153class TestSummaryRecreate(Base):
2154 """Test the `summaryrecreate` executable
2155 """
2156 def setup_method(self):
2157 """Setup the SummaryRecreate class
2158 """
2159 import configparser
2161 if not os.path.isdir(tmpdir):
2162 os.mkdir(tmpdir)
2163 config = configparser.ConfigParser()
2164 config.optionxform = str
2165 config.read(data_dir + "/config_lalinference.ini")
2166 config_dictionary = dict(config._sections)
2167 config_dictionary["paths"]["webdir"] = (
2168 "./{}/webdir".format(getuser())
2169 )
2170 make_result_file(
2171 pesummary=True, pesummary_label="recreate", extension="hdf5",
2172 config=config_dictionary, outdir=tmpdir
2173 )
2174 with open("GW150914.txt", "w") as f:
2175 f.writelines(["115"])
2177 def teardown_method(self):
2178 """Remove the files and directories created from this class
2179 """
2180 if os.path.isdir(tmpdir):
2181 shutil.rmtree(tmpdir)
2183 @pytest.mark.executabletest
2184 def test_recreate(self):
2185 """Test the `summaryrecreate` script
2186 """
2187 import configparser
2189 command_line = (
2190 "summaryrecreate --rundir {0} --samples {0}/test.h5 ".format(
2191 tmpdir
2192 )
2193 )
2194 self.launch(command_line)
2195 assert os.path.isdir(os.path.join(tmpdir, "recreate"))
2196 assert os.path.isfile(os.path.join(tmpdir, "recreate", "config.ini"))
2197 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir"))
2198 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir", "caches"))
2199 config = configparser.ConfigParser()
2200 config.read(os.path.join(tmpdir, "recreate", "config.ini"))
2201 original_config = configparser.ConfigParser()
2202 original_config.read(data_dir + "/config_lalinference.ini")
2203 for a, b in zip(
2204 sorted(config.sections()), sorted(original_config.sections())
2205 ):
2206 assert a == b
2207 for key, item in config[a].items():
2208 assert config[b][key] == item
2209 command_line = (
2210 "summaryrecreate --rundir {0}_modify --samples {0}/test.h5 "
2211 "--config_override approx:IMRPhenomPv3HM srate:4096".format(
2212 tmpdir
2213 )
2214 )
2215 self.launch(command_line)
2216 config = configparser.ConfigParser()
2217 config.read(os.path.join("{}_modify".format(tmpdir), "recreate", "config.ini"))
2218 original_config = configparser.ConfigParser()
2219 original_config.read(data_dir + "/config_lalinference.ini")
2220 for a, b in zip(
2221 sorted(config.sections()), sorted(original_config.sections())
2222 ):
2223 assert a == b
2224 for key, item in config[a].items():
2225 if key == "approx":
2226 assert original_config[b][key] != item
2227 assert config[b][key] == "IMRPhenomPv3HM"
2228 elif key == "srate":
2229 assert original_config[b][key] != item
2230 assert config[b][key] == "4096"
2231 elif key == "webdir":
2232 pass
2233 else:
2234 assert original_config[b][key] == item
2237class TestSummaryCompare(Base):
2238 """Test the SummaryCompare executable
2239 """
2240 def setup_method(self):
2241 """Setup the SummaryCompare class
2242 """
2243 if not os.path.isdir(tmpdir):
2244 os.mkdir(tmpdir)
2246 def teardown_method(self):
2247 """Remove the files and directories created from this class
2248 """
2249 if os.path.isdir(tmpdir):
2250 shutil.rmtree(tmpdir)
2252 @pytest.mark.executabletest
2253 def test_example_in_docs(self):
2254 """Test that the code runs for the example in the docs
2255 """
2256 import numpy as np
2257 from pesummary.io import write
2259 parameters = ["a", "b", "c", "d"]
2260 data = np.random.random([100, 4])
2261 write(
2262 parameters, data, file_format="dat", outdir=tmpdir,
2263 filename="example1.dat"
2264 )
2265 parameters2 = ["a", "b", "c", "d", "e"]
2266 data2 = np.random.random([100, 5])
2267 write(
2268 parameters2, data2, file_format="json", outdir=tmpdir,
2269 filename="example2.json"
2270 )
2271 command_line = (
2272 "summarycompare --samples {0}/example1.dat "
2273 "{0}/example2.json --properties_to_compare posterior_samples "
2274 "-v --generate_comparison_page --webdir {0}".format(
2275 tmpdir
2276 )
2277 )
2278 self.launch(command_line)
2281class TestSummaryJSCompare(Base):
2282 """Test the `summaryjscompare` executable
2283 """
2284 def setup_method(self):
2285 """Setup the SummaryJSCompare class
2286 """
2287 self.dirs = [tmpdir]
2288 for dd in self.dirs:
2289 if not os.path.isdir(dd):
2290 os.mkdir(dd)
2292 def teardown_method(self):
2293 """Remove the files and directories created from this class
2294 """
2295 for dd in self.dirs:
2296 if os.path.isdir(dd):
2297 shutil.rmtree(dd)
2299 @pytest.mark.executabletest
2300 def test_runs_on_core_file(self):
2301 """Test that the code successfully generates a plot for 2 core result files
2302 """
2303 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2304 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2305 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2306 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
2307 command_line = (
2308 "summaryjscompare --event test-bilby1-bilby2 --main_keys a b c d "
2309 "--webdir {0} --samples {0}/bilby.json "
2310 "{0}/bilby2.json --labels bilby1 bilby2".format(tmpdir)
2311 )
2312 self.launch(command_line)
2314 @pytest.mark.executabletest
2315 def test_runs_on_gw_file(self):
2316 """Test that the code successfully generates a plot for 2 gw result files
2317 """
2318 make_result_file(outdir=tmpdir, bilby=True, gw=True)
2319 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2320 make_result_file(outdir=tmpdir, lalinference=True)
2321 os.rename("{}/test.hdf5".format(tmpdir), "{}/lalinference.hdf5".format(tmpdir))
2322 command_line = (
2323 "summaryjscompare --event test-bilby-lalinf --main_keys mass_1 "
2324 "mass_2 a_1 a_2 --webdir {0} --samples {0}/bilby.json "
2325 "{0}/lalinference.hdf5 --labels bilby lalinf".format(
2326 tmpdir
2327 )
2328 )
2329 self.launch(command_line)