Coverage for pesummary/tests/executable_test.py: 98.7%
1187 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-05-02 08:42 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-05-02 08:42 +0000
1# License under an MIT style license -- see LICENSE.md
3import os
4import shutil
5import glob
6import subprocess
7from getpass import getuser
8import numpy as np
10from .base import (
11 make_result_file, get_list_of_plots, get_list_of_files, data_dir
12)
13import pytest
14from pesummary.utils.exceptions import InputError
15import importlib
16import tempfile
17from pathlib import Path
19tmpdir = Path(tempfile.TemporaryDirectory(prefix=".", dir=".").name).name
21__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
24class Base(object):
25 """Base class for testing the executables
26 """
27 def launch(self, command_line):
28 """
29 """
30 args = command_line.split(" ")
31 executable = args[0]
32 cla = args[1:]
33 module = importlib.import_module("pesummary.cli.{}".format(executable))
34 print(cla)
35 return module.main(args=[i for i in cla if i != " " and i != ""])
38class TestSummaryVersion(Base):
39 """Test the `summaryversion` executable
40 """
41 @pytest.mark.executabletest
42 def test_summaryversion(self):
43 """Test the `summaryversion` output matches pesummary.__version__
44 """
45 from pesummary import __version__
46 import io
47 from contextlib import redirect_stdout
49 f = io.StringIO()
50 with redirect_stdout(f):
51 self.launch("summaryversion")
52 out = f.getvalue()
53 assert out.split("\n")[1] == __version__
56class TestSummaryGracedb(Base):
57 """Test the `summarygracedb` executable with trivial examples
58 """
59 def setup_method(self):
60 """Setup the SummaryPublication class
61 """
62 if not os.path.isdir(tmpdir):
63 os.mkdir(tmpdir)
65 def teardown_method(self):
66 """Remove the files and directories created from this class
67 """
68 if os.path.isdir(tmpdir):
69 shutil.rmtree(tmpdir)
71 @pytest.mark.executabletest
72 def test_fake_event(self):
73 """Test that `summarygracedb` fails when a fake event is provided
74 """
75 from ligo.gracedb import exceptions
76 command_line = "summarygracedb --id S111111m"
77 with pytest.raises(exceptions.HTTPError):
78 self.launch(command_line)
80 @pytest.mark.executabletest
81 def test_output(self):
82 """Test the output from summarygracedb
83 """
84 import json
85 command_line = (
86 f"summarygracedb --id S190412m --output {tmpdir}/output.json"
87 )
88 self.launch(command_line)
89 with open(f"{tmpdir}/output.json", "r") as f:
90 data = json.load(f)
91 assert data["superevent_id"] == "S190412m"
92 assert "em_type" in data.keys()
93 command_line = (
94 f"summarygracedb --id S190412m --output {tmpdir}/output2.json "
95 "--info superevent_id far created"
96 )
97 self.launch(command_line)
98 with open(f"{tmpdir}/output2.json", "r") as f:
99 data2 = json.load(f)
100 assert len(data2) == 3
101 assert all(
102 info in data2.keys() for info in ["superevent_id", "far", "created"]
103 )
104 assert data2["superevent_id"] == data["superevent_id"]
105 assert data2["far"] == data["far"]
106 assert data2["created"] == data["created"]
109class TestSummaryDetchar(Base):
110 """Test the `summarydetchar` executable with trivial examples
111 """
112 def setup_method(self):
113 """Setup the SummaryDetchar class
114 """
115 from gwpy.timeseries import TimeSeries
116 if not os.path.isdir(tmpdir):
117 os.mkdir(tmpdir)
119 H1_series = TimeSeries(
120 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
121 )
122 H1_series.write(f"{tmpdir}/H1.gwf", format="gwf")
123 L1_series = TimeSeries(
124 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="L1:test"
125 )
126 L1_series.write(f"{tmpdir}/L1.hdf", format="hdf5")
128 def teardown_method(self):
129 """Remove the files and directories created from this class
130 """
131 if os.path.isdir(tmpdir):
132 shutil.rmtree(tmpdir)
134 @pytest.mark.executabletest
135 def test_spectrogram(self):
136 """Check that a spectrogram can be generated from the `summarydetchar`
137 executable
138 """
139 from gwpy.timeseries import TimeSeries
140 from matplotlib import rcParams
142 rcParams["text.usetex"] = False
143 command_line = (
144 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
145 f"--webdir {tmpdir} --plot spectrogram"
146 )
147 self.launch(command_line)
148 assert os.path.isfile(f"{tmpdir}/spectrogram_H1.png")
149 assert os.path.isfile(f"{tmpdir}/spectrogram_L1.png")
151 @pytest.mark.executabletest
152 def test_omegascan(self):
153 """Check that an omegascan can be generated from the `summarydetchar`
154 executable
155 """
156 from gwpy.timeseries import TimeSeries
157 command_line = (
158 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
159 f"--webdir {tmpdir} --plot omegascan --gps 150 --window 0.1"
160 )
161 self.launch(command_line)
162 assert os.path.isfile(f"{tmpdir}/omegascan_H1.png")
163 assert os.path.isfile(f"{tmpdir}/omegascan_L1.png")
166class TestSummaryPublication(Base):
167 """Test the `summarypublication` executable with trivial examples
168 """
169 def setup_method(self):
170 """Setup the SummaryPublication class
171 """
172 if not os.path.isdir(tmpdir):
173 os.mkdir(tmpdir)
174 make_result_file(bilby=True, gw=True, outdir=tmpdir)
175 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
177 def teardown_method(self):
178 """Remove the files and directories created from this class
179 """
180 if os.path.isdir(tmpdir):
181 shutil.rmtree(tmpdir)
183 @pytest.mark.executabletest
184 def test_2d_contour(self):
185 """Test the 2d contour plot generation
186 """
187 command_line = (
188 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
189 "--labels test --parameters mass_1 mass_2 --levels 0.9 0.5 "
190 "--plot 2d_contour --palette colorblind"
191 )
192 self.launch(command_line)
193 assert os.path.isfile(
194 os.path.join(tmpdir, "2d_contour_plot_mass_1_and_mass_2.png")
195 )
197 @pytest.mark.executabletest
198 def test_violin(self):
199 """Test the violin plot generation
200 """
201 command_line = (
202 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
203 "--labels test --parameters mass_1 --plot violin "
204 "--palette colorblind"
205 )
206 self.launch(command_line)
207 assert os.path.isfile(
208 os.path.join(tmpdir, "violin_plot_mass_1.png")
209 )
211 @pytest.mark.executabletest
212 def test_spin_disk(self):
213 """Test the spin disk generation
214 """
215 command_line = (
216 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
217 "--labels test --parameters mass_1 --plot spin_disk "
218 "--palette colorblind"
219 )
220 self.launch(command_line)
221 assert os.path.isfile(
222 os.path.join(tmpdir, "spin_disk_plot_test.png")
223 )
226class TestSummaryPipe(Base):
227 """Test the `summarypipe` executable with trivial examples
228 """
229 def setup_method(self):
230 """Setup the SummaryPipe class
231 """
232 self.dirs = [
233 tmpdir, "{}/lalinference".format(tmpdir), "{}/bilby".format(tmpdir),
234 "{}/lalinference/posterior_samples".format(tmpdir),
235 "{}/lalinference/ROQdata".format(tmpdir),
236 "{}/lalinference/engine".format(tmpdir),
237 "{}/lalinference/caches".format(tmpdir),
238 "{}/lalinference/log".format(tmpdir),
239 "{}/bilby/data".format(tmpdir), "{}/bilby/result".format(tmpdir),
240 "{}/bilby/submit".format(tmpdir),
241 "{}/bilby/log_data_analysis".format(tmpdir)
242 ]
243 for dd in self.dirs:
244 if not os.path.isdir(dd):
245 os.mkdir(dd)
246 make_result_file(
247 gw=False, lalinference=True,
248 outdir="{}/lalinference/posterior_samples/".format(tmpdir)
249 )
250 os.rename(
251 "{}/lalinference/posterior_samples/test.hdf5".format(tmpdir),
252 "{}/lalinference/posterior_samples/posterior_HL_result.hdf5".format(tmpdir)
253 )
254 make_result_file(
255 gw=False, bilby=True, outdir="{}/bilby/result/".format(tmpdir)
256 )
257 os.rename(
258 "{}/bilby/result/test.json".format(tmpdir),
259 "{}/bilby/result/label_result.json".format(tmpdir)
260 )
262 def add_config_file(self):
263 shutil.copyfile(
264 os.path.join(data_dir, "config_lalinference.ini"),
265 "{}/lalinference/config.ini".format(tmpdir)
266 )
267 shutil.copyfile(
268 os.path.join(data_dir, "config_bilby.ini"),
269 "{}/bilby/config.ini".format(tmpdir)
270 )
272 def teardown_method(self):
273 """Remove the files and directories created from this class
274 """
275 for dd in self.dirs:
276 if os.path.isdir(dd):
277 shutil.rmtree(dd)
279 @pytest.mark.executabletest
280 def test_no_config(self):
281 """Test that the code fails if there is no config file in the
282 directory
283 """
284 for _type in ["lalinference", "bilby"]:
285 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
286 with pytest.raises(FileNotFoundError):
287 self.launch(command_line)
289 @pytest.mark.executabletest
290 def test_no_samples(self):
291 """Test that the code fails if there are no posterior samples in the
292 directory
293 """
294 self.add_config_file()
295 for _type in ["lalinference", "bilby"]:
296 if _type == "lalinference":
297 os.remove(
298 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
299 tmpdir, _type
300 )
301 )
302 else:
303 os.remove(
304 "{}/{}/result/label_result.json".format(tmpdir, _type)
305 )
306 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
307 with pytest.raises(FileNotFoundError):
308 self.launch(command_line)
310 @pytest.mark.executabletest
311 def test_basic(self):
312 """Test that the code runs for a trivial example
313 """
314 self.add_config_file()
315 for _type in ["lalinference", "bilby"]:
316 command_line = (
317 "summarypipe --rundir {}/{} --return_string".format(tmpdir, _type)
318 )
319 output = self.launch(command_line)
320 assert "--config" in output
321 print(output)
322 print("{}/{}/config.ini".format(tmpdir, _type))
323 assert "{}/{}/config.ini".format(tmpdir, _type) in output
324 assert "--samples" in output
325 if _type == "lalinference":
326 _f = (
327 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
328 tmpdir, _type
329 )
330 )
331 else:
332 _f = "{}/{}/result/label_result.json".format(tmpdir, _type)
333 assert _f in output
334 assert "--webdir" in output
335 assert "--approximant" in output
336 assert "--labels" in output
338 @pytest.mark.executabletest
339 def test_override(self):
340 """Test that when you provide an option from the command line it
341 overrides the one inferred from the rundir
342 """
343 self.add_config_file()
344 command_line = (
345 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
346 )
347 output = self.launch(command_line)
348 command_line += " --labels hello"
349 output2 = self.launch(command_line)
350 assert output != output2
351 label = output.split(" ")[output.split(" ").index("--labels") + 1]
352 label2 = output2.split(" ")[output2.split(" ").index("--labels") + 1]
353 assert label != label2
354 assert label2 == "hello"
356 @pytest.mark.executabletest
357 def test_add_to_summarypages_command(self):
358 """Test that when you provide an option from the command line that
359 is not already in the summarypages command line, it adds it to the one
360 inferred from the rundir
361 """
362 self.add_config_file()
363 command_line = (
364 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
365 )
366 output = self.launch(command_line)
367 command_line += " --multi_process 10 --kde_plot --cosmology Planck15_lal"
368 output2 = self.launch(command_line)
369 assert output != output2
370 assert "--multi_process 10" in output2
371 assert "--cosmology Planck15_lal" in output2
372 assert "--kde_plot" in output2
373 assert "--multi_process 10" not in output
374 assert "--cosmology Planck15_lal" not in output
375 assert "--kde_plot" not in output
378class TestSummaryPages(Base):
379 """Test the `summarypages` executable with trivial examples
380 """
381 def setup_method(self):
382 """Setup the SummaryClassification class
383 """
384 self.dirs = [tmpdir, "{}1".format(tmpdir), "{}2".format(tmpdir)]
385 for dd in self.dirs:
386 if not os.path.isdir(dd):
387 os.mkdir(dd)
388 make_result_file(outdir=tmpdir, gw=False, extension="json")
389 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
390 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
391 os.rename("{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir))
393 def teardown_method(self):
394 """Remove the files and directories created from this class
395 """
396 for dd in self.dirs:
397 if os.path.isdir(dd):
398 shutil.rmtree(dd)
400 def check_output(
401 self, number=1, mcmc=False, existing_plot=False, expert=False,
402 gw=False
403 ):
404 """Check the output from the summarypages executable
405 """
406 assert os.path.isfile("{}/home.html".format(tmpdir))
407 plots = get_list_of_plots(
408 gw=gw, number=number, mcmc=mcmc, existing_plot=existing_plot,
409 expert=expert, outdir=tmpdir
410 )
411 for i, j in zip(
412 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
413 ):
414 print(i, j)
415 assert all(
416 i == j for i, j in zip(
417 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
418 )
419 )
420 files = get_list_of_files(
421 gw=gw, number=number, existing_plot=existing_plot, outdir=tmpdir
422 )
423 assert all(
424 i == j for i, j in zip(
425 sorted(files), sorted(glob.glob("{}/html/*.html".format(tmpdir)))
426 )
427 )
429 @pytest.mark.executabletest
430 def test_descriptions(self):
431 """Check that summarypages stores the correct descriptions when the
432 `--descriptions` flag is provided
433 """
434 import json
435 from pesummary.io import read
436 command_line = (
437 "summarypages --webdir {0} --samples {0}/example.json "
438 "{0}/example.json --labels core0 core1 --nsamples 100 "
439 "--disable_corner --descriptions core0:Description".format(tmpdir)
440 )
441 self.launch(command_line)
442 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
443 assert opened.description["core0"] == "Description"
444 assert opened.description["core1"] == "No description found"
446 with open("{}/descriptions.json".format(tmpdir), "w") as f:
447 json.dump({"core0": "Testing description", "core1": "Test"}, f)
448 command_line = (
449 "summarypages --webdir {0} --samples {0}/example.json "
450 "{0}/example.json --labels core0 core1 --nsamples 100 "
451 "--disable_corner --descriptions {0}/descriptions.json".format(tmpdir)
452 )
453 self.launch(command_line)
454 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
455 assert opened.description["core0"] == "Testing description"
456 assert opened.description["core1"] == "Test"
458 @pytest.mark.executabletest
459 def test_reweight(self):
460 """Check that summarypages reweights the posterior samples if the
461 `--reweight_samples` flag is provided
462 """
463 from pesummary.io import read
464 make_result_file(gw=True, extension="json", outdir=tmpdir)
465 command_line = (
466 "summarypages --webdir {0} --samples {0}/test.json --gw "
467 "--labels gw0 --nsamples 100 --disable_corner "
468 "--reweight_samples uniform_in_comoving_volume ".format(tmpdir)
469 )
470 self.launch(command_line)
471 self.check_output(number=1, expert=False, gw=True)
472 original = read("{0}/test.json".format(tmpdir)).samples_dict
473 _reweighted = read("{0}/samples/posterior_samples.h5".format(tmpdir))
474 reweighted = _reweighted.samples_dict
475 assert original.number_of_samples >= reweighted["gw0"].number_of_samples
476 inds = np.array([
477 original.parameters.index(param) for param in
478 reweighted["gw0"].parameters if param in original.parameters
479 ])
480 assert all(
481 reweighted_sample[inds] in original.samples.T for reweighted_sample
482 in reweighted["gw0"].samples.T
483 )
484 _kwargs = _reweighted.extra_kwargs[0]
485 assert _kwargs["sampler"]["nsamples_before_reweighting"] == 100
486 assert _kwargs["sampler"]["nsamples"] == reweighted["gw0"].number_of_samples
487 assert _kwargs["meta_data"]["reweighting"] == "uniform_in_comoving_volume"
489 @pytest.mark.executabletest
490 def test_checkpoint(self):
491 """Check that when restarting from checkpoint, the outputs are
492 consistent
493 """
494 import time
495 command_line = (
496 "summarypages --webdir {0} --samples {0}/example.json "
497 "--labels core0 --nsamples 100 "
498 "--restart_from_checkpoint".format(tmpdir)
499 )
500 t0 = time.time()
501 self.launch(command_line)
502 t1 = time.time()
503 assert os.path.isfile("{}/checkpoint/pesummary_resume.pickle".format(tmpdir))
504 self.check_output(number=1, expert=False)
505 t2 = time.time()
506 self.launch(command_line)
507 t3 = time.time()
508 assert t3 - t2 < t1 - t0
509 self.check_output(number=1, expert=False)
510 # get timestamp of plot
511 made_time = os.path.getmtime(glob.glob("{}/plots/*.png".format(tmpdir))[0])
512 assert made_time < t2
514 @pytest.mark.executabletest
515 def test_expert(self):
516 """Check that summarypages produces the expected expert diagnostic
517 plots
518 """
519 command_line = (
520 "summarypages --webdir {0} --samples {0}/example.json "
521 "--labels core0 --nsamples 100".format(tmpdir)
522 )
523 self.launch(command_line)
524 self.check_output(number=1, expert=False)
525 command_line = (
526 "summarypages --webdir {0} --samples {0}/example.json "
527 "--labels core0 --nsamples 100 --enable_expert".format(tmpdir)
528 )
529 self.launch(command_line)
530 self.check_output(number=1, expert=True)
532 @pytest.mark.executabletest
533 def test_prior_input(self):
534 """Check that `summarypages` works when a prior file is passed from
535 the command line
536 """
537 import importlib
538 from bilby import gw
540 path = gw.__path__[0]
541 bilby_prior_file = os.path.join(
542 path, "prior_files", "GW150914.prior"
543 )
545 for package in ["core", "gw"]:
546 gw = True if package == "gw" else False
547 module = importlib.import_module(
548 "pesummary.{}.file.read".format(package)
549 )
550 make_result_file(outdir=tmpdir, gw=gw, extension="json")
551 os.rename("{}/test.json".format(tmpdir), "{}/prior.json".format(tmpdir))
552 for _file in ["{}/prior.json".format(tmpdir), bilby_prior_file]:
553 command_line = (
554 "summarypages --webdir {} --samples {}/example.json "
555 "--labels test --prior_file {} --nsamples_for_prior "
556 "10 ".format(tmpdir, tmpdir, _file)
557 )
558 command_line += " --gw" if gw else ""
559 self.launch(command_line)
560 f = module.read("{}/samples/posterior_samples.h5".format(tmpdir))
561 if _file != bilby_prior_file:
562 stored = f.priors["samples"]["test"]
563 f = module.read(_file)
564 original = f.samples_dict
565 for param in original.keys():
566 np.testing.assert_almost_equal(
567 original[param], stored[param]
568 )
569 # Non-bilby prior file will have same number or prior
570 # samples as posterior samples
571 assert len(stored[param]) == 1000
572 else:
573 from bilby.core.prior import PriorDict
575 analytic = f.priors["analytic"]["test"]
576 bilby_prior = PriorDict(filename=bilby_prior_file)
577 for param, value in bilby_prior.items():
578 assert analytic[param] == str(value)
579 params = list(f.priors["samples"]["test"].keys())
580 # A bilby prior file will have 10 prior samples
581 assert len(f.priors["samples"]["test"][params[0]]) == 10
583 @pytest.mark.executabletest
584 def test_calibration_and_psd(self):
585 """Test that the calibration and psd files are passed appropiately
586 """
587 from pesummary.gw.file.read import read
588 from .base import make_psd, make_calibration
590 make_psd(outdir=tmpdir)
591 make_calibration(outdir=tmpdir)
592 command_line = (
593 "summarypages --webdir {0} --samples {0}/example.json "
594 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
595 "--labels test --posterior_samples_filename example.h5 ".format(tmpdir)
596 )
597 self.launch(command_line)
598 f = read("{}/samples/example.h5".format(tmpdir))
599 psd = np.genfromtxt("{}/psd.dat".format(tmpdir))
600 calibration = np.genfromtxt("{}/calibration.dat".format(tmpdir))
601 np.testing.assert_almost_equal(f.psd["test"]["H1"], psd)
602 np.testing.assert_almost_equal(
603 f.priors["calibration"]["test"]["L1"], calibration
604 )
606 @pytest.mark.executabletest
607 def test_strain_data(self):
608 """Test that the gravitational wave data is passed appropiately
609 """
610 from pesummary.io import read
611 from gwpy.timeseries import TimeSeries
613 H1_series = TimeSeries(
614 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
615 )
616 H1_series.write("{}/H1.gwf".format(tmpdir), format="gwf")
617 L1_series = TimeSeries(
618 np.random.uniform(-1, 1, 1000), t0=201, dt=0.2, name="L1:test"
619 )
620 L1_series.write("{}/L1.hdf".format(tmpdir), format="hdf5")
621 command_line = (
622 "summarypages --webdir {0} --samples {0}/example.json "
623 "--gwdata H1:test:{0}/H1.gwf L1:test:{0}/L1.hdf "
624 "--labels test --disable_corner --disable_interactive".format(tmpdir)
625 )
626 self.launch(command_line)
627 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
628 gwdata = f.gwdata
629 assert all(IFO in gwdata.detectors for IFO in ["H1", "L1"])
630 strain = {"H1": H1_series, "L1": L1_series}
631 for IFO in gwdata.detectors:
632 np.testing.assert_almost_equal(gwdata[IFO].value, strain[IFO].value)
633 assert gwdata[IFO].t0 == strain[IFO].t0
634 assert gwdata[IFO].dt == strain[IFO].dt
635 assert gwdata[IFO].unit == strain[IFO].unit
637 @pytest.mark.executabletest
638 def test_gracedb(self):
639 """Test that when the gracedb ID is passed from the command line it is
640 correctly stored in the meta data
641 """
642 from pesummary.gw.file.read import read
644 command_line = (
645 "summarypages --webdir {0} --samples {0}/example.json "
646 "--gracedb G17864 --gw --labels test".format(tmpdir)
647 )
648 self.launch(command_line)
649 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
650 assert "gracedb" in f.extra_kwargs[0]["meta_data"]
651 assert "G17864" == f.extra_kwargs[0]["meta_data"]["gracedb"]["id"]
653 @pytest.mark.executabletest
654 def test_single(self):
655 """Test on a single input
656 """
657 command_line = (
658 "summarypages --webdir {0} --samples "
659 "{0}/example.json --label core0 ".format(tmpdir)
660 )
661 self.launch(command_line)
662 self.check_output(number=1)
664 @pytest.mark.executabletest
665 def test_summarycombine_output(self):
666 """Test on a summarycombine output
667 """
668 from .base import make_psd, make_calibration
670 make_psd(outdir=tmpdir)
671 make_calibration(outdir=tmpdir)
672 command_line = (
673 "summarycombine --webdir {0}1 --samples "
674 "{0}/example.json --label gw0 "
675 "--calibration L1:{0}/calibration.dat --gw".format(tmpdir)
676 )
677 self.launch(command_line)
678 command_line = (
679 "summarycombine --webdir {0}2 --samples "
680 "{0}/example.json --label gw1 "
681 "--psd H1:{0}/psd.dat --gw".format(tmpdir)
682 )
683 self.launch(command_line)
684 command_line = (
685 "summarycombine --webdir {0} --gw --samples "
686 "{0}1/samples/posterior_samples.h5 "
687 "{0}2/samples/posterior_samples.h5 ".format(tmpdir)
688 )
689 self.launch(command_line)
690 command_line = (
691 "summarypages --webdir {0} --gw --samples "
692 "{0}/samples/posterior_samples.h5 ".format(tmpdir)
693 )
694 self.launch(command_line)
696 @pytest.mark.executabletest
697 def test_mcmc(self):
698 """Test the `--mcmc_samples` command line argument
699 """
700 command_line = (
701 "summarypages --webdir {0} --samples "
702 "{0}/example.json {0}/example2.h5 "
703 "--label core0 --mcmc_samples".format(tmpdir)
704 )
705 self.launch(command_line)
706 self.check_output(number=1, mcmc=True)
708 @pytest.mark.executabletest
709 def test_kde_plot(self):
710 """Test that the kde plots work on a single input and on MCMC inputs
711 """
712 command_line = (
713 "summarypages --webdir {0} --samples "
714 "{0}/example.json --label core0 --kde_plot "
715 "".format(tmpdir)
716 )
717 self.launch(command_line)
718 self.check_output(number=1)
719 command_line = (
720 "summarypages --webdir {0} --samples "
721 "{0}/example.json {0}/example2.h5 "
722 "--label core0 --mcmc_samples --kde_plot".format(tmpdir)
723 )
724 self.launch(command_line)
725 self.check_output(number=1, mcmc=True)
727 @pytest.mark.executabletest
728 def test_mcmc_more_than_label(self):
729 """Test that the code fails with the `--mcmc_samples` command line
730 argument when multiple labels are passed.
731 """
732 command_line = (
733 "summarypages --webdir {0} --samples "
734 "{0}/example.json {0}/example2.h5 "
735 "{0}/example.json {0}/example2.h5 "
736 "--label core0 core1 --mcmc_samples".format(tmpdir)
737 )
738 with pytest.raises(InputError):
739 self.launch(command_line)
741 @pytest.mark.executabletest
742 def test_file_format_wrong_number(self):
743 """Test that the code fails with the `--file_format` command line
744 argument when the number of file formats does not match the number of
745 samples
746 """
747 command_line = (
748 "summarypages --webdir {0} --samples "
749 "{0}/example.json {0}/example2.h5 "
750 "--file_format hdf5 json dat".format(tmpdir)
751 )
752 with pytest.raises(InputError):
753 self.launch(command_line)
755 @pytest.mark.executabletest
756 def test_add_existing_plot(self):
757 """Test that an Additional page is made if existing plots are provided
758 to the summarypages executable
759 """
760 with open("{}/test.png".format(tmpdir), "w") as f:
761 f.writelines("")
762 command_line = (
763 "summarypages --webdir {0} --samples "
764 "{0}/example.json --label core0 --add_existing_plot "
765 "core0:{0}/test.png ".format(tmpdir)
766 )
767 self.launch(command_line)
768 self.check_output(number=1, existing_plot=True)
769 command_line = (
770 "summarypages --webdir {0} --samples "
771 "{0}/example.json {0}/example.json --label core0 core1 "
772 "--add_existing_plot core0:{0}/test.png core1:{0}/test.png "
773 "".format(tmpdir)
774 )
775 self.launch(command_line)
776 self.check_output(number=2, existing_plot=True)
779class TestSummaryPagesLW(Base):
780 """Test the `summarypageslw` executable
781 """
782 def setup_method(self):
783 """Setup the SummaryPagesLW class
784 """
785 if not os.path.isdir(tmpdir):
786 os.mkdir(tmpdir)
787 make_result_file(bilby=True, gw=True, outdir=tmpdir)
788 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
790 def teardown_method(self):
791 """Remove the files and directories created from this class
792 """
793 if os.path.isdir(tmpdir):
794 shutil.rmtree(tmpdir)
796 def check_output(
797 self, gw=False, number=1, outdir=tmpdir, parameters=[], sections=[],
798 extra_gw_plots=True
799 ):
800 """Check the output from the summarypages executable
801 """
802 assert os.path.isfile("./{}/home.html".format(outdir))
803 plots = get_list_of_plots(
804 gw=gw, number=number, mcmc=False, existing_plot=False,
805 expert=False, parameters=parameters, outdir=outdir,
806 extra_gw_plots=extra_gw_plots
807 )
808 assert all(
809 i in plots for i in glob.glob("{}/plots/*.png".format(outdir))
810 )
811 assert all(
812 i in glob.glob("{}/plots/*.png".format(outdir)) for i in plots
813 )
814 files = get_list_of_files(
815 gw=gw, number=number, existing_plot=False, parameters=parameters,
816 sections=sections, outdir=outdir, extra_gw_pages=extra_gw_plots
817 )
818 assert all(
819 i in files for i in glob.glob("{}/html/*.html".format(outdir))
820 )
821 for i in files:
822 print(i, i in glob.glob("{}/html/*.html".format(outdir)))
823 assert all(
824 i in glob.glob("{}/html/*.html".format(outdir)) for i in files
825 )
827 @pytest.mark.executabletest
828 def test_single(self):
829 """Test that the `summarypageslw` executable works as expected
830 when a single result file is provided
831 """
832 command_line = (
833 "summarypageslw --webdir {0} --samples {0}/bilby.json "
834 "--labels core0 --parameters mass_1 mass_2 "
835 "".format(tmpdir)
836 )
837 self.launch(command_line)
838 self.check_output(parameters=["mass_1", "mass_2"], sections=["M-P"])
839 command_line = (
840 "summarypageslw --webdir {0}/gw --samples {0}/bilby.json "
841 "--labels gw0 --parameters mass_1 mass_2 "
842 "--gw".format(tmpdir)
843 )
844 self.launch(command_line)
845 self.check_output(
846 gw=True, parameters=["mass_1", "mass_2"], sections=["masses"],
847 outdir="{}/gw".format(tmpdir), extra_gw_plots=False
848 )
849 command_line = command_line.replace(
850 "{}/gw".format(tmpdir), "{}/gw2".format(tmpdir)
851 )
852 command_line = command_line.replace("mass_1", "made_up_label")
853 self.launch(command_line)
854 self.check_output(
855 gw=True, parameters=["mass_2"], sections=["masses"],
856 outdir="{}/gw2".format(tmpdir), extra_gw_plots=False
857 )
858 with pytest.raises(Exception):
859 command_line = command_line.replace("mass_2", "made_up_label2")
860 self.launch(command_line)
862 @pytest.mark.executabletest
863 def test_double(self):
864 """Test that the `summarypageslw` executable works as expected
865 when multiple result files are provided
866 """
867 command_line = (
868 "summarypageslw --webdir {0} --samples {0}/bilby.json "
869 "{0}/bilby.json --labels core0 core1 --parameters mass_1 mass_2 "
870 "".format(tmpdir)
871 )
872 self.launch(command_line)
873 self.check_output(
874 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"]
875 )
877 @pytest.mark.executabletest
878 def test_pesummary(self):
879 """Test that the `summarypageslw` executable works as expected
880 for a pesummary metafile
881 """
882 command_line = (
883 "summarycombine --webdir {0} --samples {0}/bilby.json "
884 "{0}/bilby.json --no_conversion --gw --labels core0 core1 "
885 "--nsamples 100".format(tmpdir)
886 )
887 self.launch(command_line)
888 command_line = (
889 "summarypageslw --webdir {0}/lw --samples "
890 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
891 "".format(tmpdir)
892 )
893 self.launch(command_line)
894 self.check_output(
895 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"],
896 outdir="{}/lw".format(tmpdir)
897 )
898 command_line = command_line.replace(
899 "{}/lw".format(tmpdir), "{}/lw2".format(tmpdir)
900 )
901 command_line = command_line.replace("mass_1", "made_up_label")
902 self.launch(command_line)
903 self.check_output(
904 number=2, parameters=["mass_2"], sections=["M-P"],
905 outdir="{}/lw2".format(tmpdir)
906 )
907 make_result_file(bilby=True, gw=False, outdir=tmpdir)
908 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
909 command_line = (
910 "summarycombine --webdir {0} --samples {0}/bilby.json "
911 "{0}/bilby2.json --no_conversion --gw --labels core0 core1 "
912 "--nsamples 100".format(tmpdir)
913 )
914 self.launch(command_line)
915 command_line = (
916 "summarypageslw --webdir {0}/lw3 --samples "
917 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
918 "".format(tmpdir)
919 )
920 self.launch(command_line)
921 self.check_output(
922 number=1, parameters=["mass_1", "mass_2"], sections=["M-P"],
923 outdir="{}/lw3".format(tmpdir)
924 )
927class TestSummaryClassification(Base):
928 """Test the `summaryclassification` executable
929 """
930 def setup_method(self):
931 """Setup the SummaryClassification class
932 """
933 if not os.path.isdir(tmpdir):
934 os.mkdir(tmpdir)
935 make_result_file(outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test")
936 os.rename("{}/test.json".format(tmpdir), "{}/pesummary.json".format(tmpdir))
937 make_result_file(outdir=tmpdir, bilby=True, gw=True)
938 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
940 def teardown_method(self):
941 """Remove the files and directories created from this class
942 """
943 if os.path.isdir(tmpdir):
944 shutil.rmtree(tmpdir)
946 def check_output(self):
947 """Check the output from the `summaryclassification` executable
948 """
949 import glob
950 import json
952 files = glob.glob("{}/*".format(tmpdir))
953 assert "{}/test_default_prior_pe_classification.json".format(tmpdir) in files
954 assert "{}/test_default_pepredicates_bar.png".format(tmpdir) in files
955 with open("{}/test_default_prior_pe_classification.json".format(tmpdir), "r") as f:
956 data = json.load(f)
957 assert all(
958 i in data.keys() for i in [
959 "BNS", "NSBH", "BBH", "MassGap", "HasNS", "HasRemnant"
960 ]
961 )
963 @pytest.mark.executabletest
964 def test_result_file(self):
965 """Test the `summaryclassification` executable for a random result file
966 """
967 command_line = (
968 "summaryclassification --webdir {0} --samples "
969 "{0}/bilby.json --prior default --label test".format(tmpdir)
970 )
971 self.launch(command_line)
972 self.check_output()
974 @pytest.mark.executabletest
975 def test_pesummary_file(self):
976 """Test the `summaryclassification` executable for a pesummary metafile
977 """
978 command_line = (
979 "summaryclassification --webdir {0} --samples "
980 "{0}/pesummary.json --prior default".format(tmpdir)
981 )
982 self.launch(command_line)
983 self.check_output()
986class TestSummaryTGR(Base):
987 """Test the `summarytgr` executable
988 """
989 def setup_method(self):
990 """Setup the SummaryTGR class
991 """
992 if not os.path.isdir(tmpdir):
993 os.mkdir(tmpdir)
994 make_result_file(
995 outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test"
996 )
997 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/pesummary.json")
998 make_result_file(outdir=tmpdir, bilby=True, gw=True)
999 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
1001 def teardown_method(self):
1002 """Remove the files and directories created from this class
1003 """
1004 if os.path.isdir(tmpdir):
1005 shutil.rmtree(tmpdir)
1007 def check_output(self, diagnostic=True):
1008 """Check the output from the `summarytgr` executable
1009 """
1010 import glob
1012 image_files = glob.glob(f"{tmpdir}/plots/*")
1013 image_base_string = tmpdir + "/plots/primary_imrct_{}.png"
1014 file_strings = ["deviations_triangle_plot"]
1015 if diagnostic:
1016 file_strings += [
1017 "mass_1_mass_2", "a_1_a_2",
1018 "final_mass_non_evolved_final_spin_non_evolved"
1019 ]
1020 for file_string in file_strings:
1021 assert image_base_string.format(file_string) in image_files
1023 @pytest.mark.executabletest
1024 def test_result_file(self):
1025 """Test the `summarytgr` executable for a random result file
1026 """
1027 command_line = (
1028 f"summarytgr --webdir {tmpdir} "
1029 f"--samples {tmpdir}/bilby.json {tmpdir}/bilby.json "
1030 "--test imrct "
1031 "--labels inspiral postinspiral "
1032 "--imrct_kwargs N_bins:11 "
1033 "--make_diagnostic_plots "
1034 "--disable_pe_page_generation"
1035 )
1036 self.launch(command_line)
1037 self.check_output()
1039 @pytest.mark.executabletest
1040 def test_pesummary_file(self):
1041 """Test the `summarytgr` executable for a pesummary metafile
1042 """
1043 command_line = (
1044 f"summarytgr --webdir {tmpdir} --samples "
1045 f"{tmpdir}/pesummary.json {tmpdir}/pesummary.json --labels "
1046 "test:inspiral test:postinspiral --test imrct --imrct_kwargs "
1047 "N_bins:11 --disable_pe_page_generation"
1048 )
1049 self.launch(command_line)
1050 self.check_output(diagnostic=False)
1052 @pytest.mark.executabletest
1053 def test_pdfs_and_gr_quantile(self):
1054 """Test that the GR quantile and pdf matches the LAL implementation
1055 The LAL files were produced by the executable imrtgr_imr_consistency_test
1056 with N_bins=201 dMfbyMf_lim=3 dchifbychif_lim=3 and bbh_average_fits_precessing
1057 """
1058 from pesummary.io import read
1060 make_result_file(outdir="./", extension="dat", gw=True, random_seed=123456789)
1061 os.rename("./test.dat", f"{tmpdir}/inspiral.dat")
1062 make_result_file(outdir="./", extension="dat", gw=True, random_seed=987654321)
1063 os.rename("./test.dat", f"{tmpdir}/postinspiral.dat")
1064 command_line = (
1065 f"summarytgr --webdir {tmpdir} "
1066 f"--samples {tmpdir}/inspiral.dat {tmpdir}/postinspiral.dat "
1067 "--test imrct "
1068 "--labels inspiral postinspiral "
1069 "--imrct_kwargs N_bins:201 final_mass_deviation_lim:3 final_spin_deviation_lim:3 "
1070 "--disable_pe_page_generation"
1071 )
1072 self.launch(command_line)
1073 f = read(f"{tmpdir}/samples/tgr_samples.h5")
1074 pesummary_quantile = f.extra_kwargs["primary"]["GR Quantile (%)"]
1075 probdict = f.imrct_deviation["final_mass_final_spin_deviations"]
1076 lal_pdf = np.loadtxt(os.path.join(data_dir, "lal_pdf_for_summarytgr.dat.gz"))
1077 pesummary_pdf = probdict.probs / probdict.dx / probdict.dy
1079 np.testing.assert_almost_equal(pesummary_quantile, 3.276372814744687306)
1080 np.testing.assert_almost_equal(pesummary_pdf, lal_pdf)
1083class TestSummaryClean(Base):
1084 """Test the `summaryclean` executable
1085 """
1086 def setup_method(self):
1087 """Setup the SummaryClassification class
1088 """
1089 if not os.path.isdir(tmpdir):
1090 os.mkdir(tmpdir)
1092 def teardown_method(self):
1093 """Remove the files and directories created from this class
1094 """
1095 if os.path.isdir(tmpdir):
1096 shutil.rmtree(tmpdir)
1098 @pytest.mark.executabletest
1099 def test_clean(self):
1100 """Test the `summaryclean` executable
1101 """
1102 import h5py
1104 parameters = ["mass_ratio"]
1105 data = [[0.5], [0.5], [-1.5]]
1106 h5py_data = np.array(
1107 [tuple(i) for i in data], dtype=[tuple([i, 'float64']) for i in
1108 parameters]
1109 )
1110 f = h5py.File("{}/test.hdf5".format(tmpdir), "w")
1111 lalinference = f.create_group("lalinference")
1112 nest = lalinference.create_group("lalinference_nest")
1113 samples = nest.create_dataset("posterior_samples", data=h5py_data)
1114 f.close()
1115 command_line = (
1116 "summaryclean --webdir {0} --samples {0}/test.hdf5 "
1117 "--file_format dat --labels test".format(tmpdir)
1118 )
1119 self.launch(command_line)
1120 self.check_output()
1122 def check_output(self):
1123 """Check the output from the `summaryclean` executable
1124 """
1125 from pesummary.gw.file.read import read
1127 f = read("{}/pesummary_test.dat".format(tmpdir))
1128 print(f.samples_dict["mass_ratio"])
1129 assert len(f.samples_dict["mass_ratio"]) == 2
1130 assert all(i == 0.5 for i in f.samples_dict["mass_ratio"])
1133class _SummaryCombine_Metafiles(Base):
1134 """Test the `summarycombine_metafile` executable
1135 """
1136 @pytest.mark.executabletest
1137 def test_combine(self, gw=False):
1138 """Test the executable for 2 metafiles
1139 """
1140 make_result_file(outdir=tmpdir, pesummary=True, pesummary_label="label2")
1141 os.rename("{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir))
1142 make_result_file(outdir=tmpdir, pesummary=True)
1143 command_line = (
1144 "summarycombine --webdir {0} "
1145 "--samples {0}/test.json {0}/test2.json "
1146 "--save_to_json".format(tmpdir)
1147 )
1148 if gw:
1149 command_line += " --gw"
1150 self.launch(command_line)
1152 def check_output(self, gw=False):
1153 if gw:
1154 from pesummary.gw.file.read import read
1155 else:
1156 from pesummary.core.file.read import read
1158 assert os.path.isfile("{}/samples/posterior_samples.json".format(tmpdir))
1159 combined = read("{}/samples/posterior_samples.json".format(tmpdir))
1160 for f in ["{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir)]:
1161 data = read(f)
1162 labels = data.labels
1163 assert all(i in combined.labels for i in labels)
1164 assert all(
1165 all(
1166 data.samples_dict[j][num] == combined.samples_dict[i][j][num]
1167 for num in range(data.samples_dict[j])
1168 ) for j in data.samples_dict.keys()
1169 )
1172class TestCoreSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1173 """Test the `summarycombine_metafile` executable
1174 """
1175 def setup_method(self):
1176 """Setup the SummaryCombine_Metafiles class
1177 """
1178 if not os.path.isdir(tmpdir):
1179 os.mkdir(tmpdir)
1180 make_result_file(outdir=tmpdir, pesummary=True)
1182 def teardown_method(self):
1183 """Remove the files and directories created from this class
1184 """
1185 if os.path.isdir(tmpdir):
1186 shutil.rmtree(tmpdir)
1188 @pytest.mark.executabletest
1189 def test_combine(self):
1190 """Test the executable for 2 metafiles
1191 """
1192 super(TestCoreSummaryCombine_Metafiles, self).test_combine(gw=False)
1194 def check_output(self):
1195 super(TestCoreSummaryCombine_Metafiles, self).check_output(gw=False)
1198class TestGWSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1199 """Test the `summarycombine_metafile` executable
1200 """
1201 def setup_method(self):
1202 """Setup the SummaryCombine_Metafiles class
1203 """
1204 if not os.path.isdir(tmpdir):
1205 os.mkdir(tmpdir)
1206 make_result_file(outdir=tmpdir, pesummary=True)
1208 def teardown_method(self):
1209 """Remove the files and directories created from this class
1210 """
1211 if os.path.isdir(tmpdir):
1212 shutil.rmtree(tmpdir)
1214 @pytest.mark.executabletest
1215 def test_combine(self):
1216 """Test the executable for 2 metafiles
1217 """
1218 super(TestGWSummaryCombine_Metafiles, self).test_combine(gw=True)
1220 def check_output(self, gw=True):
1221 super(TestGWSummaryCombine_Metafiles, self).check_output(gw=True)
1224class TestSummaryCombine(Base):
1225 """Test the `summarycombine` executable
1226 """
1227 def setup_method(self):
1228 """Setup the SummaryCombine class
1229 """
1230 self.dirs = [tmpdir]
1231 for dd in self.dirs:
1232 if not os.path.isdir(dd):
1233 os.mkdir(dd)
1235 def teardown_method(self):
1236 """Remove the files and directories created from this class
1237 """
1238 for dd in self.dirs:
1239 if os.path.isdir(dd):
1240 shutil.rmtree(dd)
1242 @pytest.mark.executabletest
1243 def test_disable_prior_sampling(self):
1244 """Test that the code skips prior sampling when the appropiate flag
1245 is provided to the `summarypages` executable
1246 """
1247 from pesummary.io import read
1249 make_result_file(outdir=tmpdir, bilby=True, gw=False)
1250 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
1251 command_line = (
1252 "summarycombine --webdir {0} --samples {0}/bilby.json "
1253 "--labels core0".format(tmpdir)
1254 )
1255 self.launch(command_line)
1256 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1257 assert len(f.priors["samples"]["core0"])
1259 command_line = (
1260 "summarycombine --webdir {0} --samples {0}/bilby.json "
1261 "--disable_prior_sampling --labels core0".format(tmpdir)
1262 )
1263 self.launch(command_line)
1264 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1265 assert not len(f.priors["samples"]["core0"])
1267 @pytest.mark.executabletest
1268 def test_external_hdf5_links(self):
1269 """Test that seperate hdf5 files are made when the
1270 `--external_hdf5_links` command line is passed
1271 """
1272 from pesummary.gw.file.read import read
1273 from .base import make_psd, make_calibration
1275 make_result_file(outdir=tmpdir, gw=True, extension="json")
1276 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1277 make_psd(outdir=tmpdir)
1278 make_calibration(outdir=tmpdir)
1279 command_line = (
1280 "summarycombine --webdir {0} --samples "
1281 "{0}/example.json --label gw0 --external_hdf5_links --gw "
1282 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1283 "--no_conversion".format(tmpdir)
1284 )
1285 self.launch(command_line)
1286 assert os.path.isfile(
1287 os.path.join(tmpdir, "samples", "posterior_samples.h5")
1288 )
1289 assert os.path.isfile(
1290 os.path.join(tmpdir, "samples", "_gw0.h5")
1291 )
1292 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1293 g = read("{}/example.json".format(tmpdir))
1294 h = read("{}/samples/_gw0.h5".format(tmpdir))
1295 np.testing.assert_almost_equal(f.samples[0], g.samples)
1296 np.testing.assert_almost_equal(f.samples[0], h.samples[0])
1297 np.testing.assert_almost_equal(f.psd["gw0"]["H1"], h.psd["gw0"]["H1"])
1298 np.testing.assert_almost_equal(
1299 f.priors["calibration"]["gw0"]["L1"],
1300 h.priors["calibration"]["gw0"]["L1"]
1301 )
1303 @pytest.mark.executabletest
1304 def test_compression(self):
1305 """Test that the metafile is reduced in size when the datasets are
1306 compressed with maximum compression level
1307 """
1308 from pesummary.gw.file.read import read
1309 from .base import make_psd, make_calibration
1311 make_result_file(outdir=tmpdir, gw=True, extension="json")
1312 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1313 make_psd(outdir=tmpdir)
1314 make_calibration(outdir=tmpdir)
1315 command_line = (
1316 "summarycombine --webdir {0} --samples "
1317 "{0}/example.json --label gw0 --no_conversion --gw "
1318 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat ".format(
1319 tmpdir
1320 )
1321 )
1322 self.launch(command_line)
1323 original_size = os.stat("{}/samples/posterior_samples.h5".format(tmpdir)).st_size
1324 command_line = (
1325 "summarycombine --webdir {0} --samples "
1326 "{0}/example.json --label gw0 --no_conversion --gw "
1327 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1328 "--hdf5_compression 9 --posterior_samples_filename "
1329 "posterior_samples2.h5".format(tmpdir)
1330 )
1331 self.launch(command_line)
1332 compressed_size = os.stat("{}/samples/posterior_samples2.h5".format(tmpdir)).st_size
1333 assert compressed_size < original_size
1335 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1336 g = read("{}/samples/posterior_samples2.h5".format(tmpdir))
1337 posterior_samples = f.samples[0]
1338 posterior_samples2 = g.samples[0]
1339 np.testing.assert_almost_equal(posterior_samples, posterior_samples2)
1341 @pytest.mark.executabletest
1342 def test_seed(self):
1343 """Test that the samples stored in the metafile are identical for two
1344 runs if the random seed is the same
1345 """
1346 from pesummary.gw.file.read import read
1348 make_result_file(outdir=tmpdir, gw=True, extension="json")
1349 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1350 command_line = (
1351 "summarycombine --webdir {0} --samples "
1352 "{0}/example.json --label gw0 --no_conversion --gw "
1353 "--nsamples 10 --seed 1000".format(tmpdir)
1354 )
1355 self.launch(command_line)
1356 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1357 command_line = (
1358 "summarycombine --webdir {0} --samples "
1359 "{0}/example.json --label gw0 --no_conversion --gw "
1360 "--nsamples 10 --seed 2000".format(tmpdir)
1361 )
1362 self.launch(command_line)
1363 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1364 try:
1365 np.testing.assert_almost_equal(
1366 original.samples[0], new.samples[0]
1367 )
1368 raise AssertionError("Failed")
1369 except AssertionError:
1370 pass
1372 command_line = (
1373 "summarycombine --webdir {0} --samples "
1374 "{0}/example.json --label gw0 --no_conversion --gw "
1375 "--nsamples 10 --seed 1000".format(tmpdir)
1376 )
1377 self.launch(command_line)
1378 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1379 command_line = (
1380 "summarycombine --webdir {0} --samples "
1381 "{0}/example.json --label gw0 --no_conversion --gw "
1382 "--nsamples 10 --seed 1000".format(tmpdir)
1383 )
1384 self.launch(command_line)
1385 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1386 np.testing.assert_almost_equal(
1387 original.samples[0], new.samples[0]
1388 )
1390 @pytest.mark.executabletest
1391 def test_preferred(self):
1392 """Test that the preferred analysis is correctly stored in the metafile
1393 """
1394 from pesummary.io import read
1395 make_result_file(gw=True, extension="json", outdir=tmpdir)
1396 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1397 command_line = (
1398 "summarycombine --webdir {0} --samples "
1399 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1400 "--gw --nsamples 10".format(tmpdir)
1401 )
1402 self.launch(command_line)
1403 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1404 assert f.preferred is None
1405 command_line = (
1406 "summarycombine --webdir {0} --samples "
1407 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1408 "--gw --nsamples 10 --preferred gw1".format(tmpdir)
1409 )
1410 self.launch(command_line)
1411 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1412 assert f.preferred == "gw1"
1413 command_line = (
1414 "summarycombine --webdir {0} --samples "
1415 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1416 "--gw --nsamples 10 --preferred gw2".format(tmpdir)
1417 )
1418 self.launch(command_line)
1419 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1420 assert f.preferred is None
1423class TestSummaryReview(Base):
1424 """Test the `summaryreview` executable
1425 """
1426 def setup_method(self):
1427 """Setup the SummaryCombine_Metafiles class
1428 """
1429 if not os.path.isdir(tmpdir):
1430 os.mkdir(tmpdir)
1431 make_result_file(outdir=tmpdir, lalinference=True)
1433 def teardown_method(self):
1434 """Remove the files and directories created from this class
1435 """
1436 if os.path.isdir(tmpdir):
1437 shutil.rmtree(tmpdir)
1439 @pytest.mark.executabletest
1440 def test_review(self):
1441 """Test the `summaryreview` script for a `lalinference` result file
1442 """
1443 command_line = (
1444 "summaryreview --webdir {0} --samples {0}/test.hdf5 "
1445 "--test core_plots".format(tmpdir)
1446 )
1447 self.launch(command_line)
1450class TestSummarySplit(Base):
1451 """Test the `summarysplit` executable
1452 """
1453 def setup_method(self):
1454 """Setup the SummarySplit class
1455 """
1456 if not os.path.isdir(tmpdir):
1457 os.mkdir(tmpdir)
1458 make_result_file(outdir=tmpdir, gw=False, extension="json")
1459 make_result_file(outdir=tmpdir, gw=False, extension="hdf5", n_samples=500)
1461 def teardown_method(self):
1462 """Remove the files and directories created from this class
1463 """
1464 if os.path.isdir(tmpdir):
1465 shutil.rmtree(tmpdir)
1467 @pytest.mark.executabletest
1468 def test_split_single_analysis(self):
1469 """Test that a file containing a single analysis is successfully split
1470 into N_samples result files
1471 """
1472 from pesummary.io import read
1473 command_line = (
1474 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1475 f"--outdir {tmpdir}/split"
1476 )
1477 self.launch(command_line)
1478 original = read(f"{tmpdir}/test.json").samples_dict
1479 files = glob.glob(f"{tmpdir}/split/*.json")
1480 assert len(files) == original.number_of_samples
1481 for num, f in enumerate(files):
1482 g = read(f).samples_dict
1483 assert g.number_of_samples == 1
1484 idx = int(f.split("/")[-1].split("_")[-1].split(".")[0])
1485 for param in g.keys():
1486 assert g[param] == original[param][idx]
1487 command_line = (
1488 "summarycombine_posteriors --use_all --samples {} "
1489 f"--outdir {tmpdir} --filename combined_split.dat "
1490 "--file_format dat --labels {}"
1491 ).format(
1492 " ".join(files), " ".join(
1493 np.arange(original.number_of_samples).astype(str)
1494 )
1495 )
1496 self.launch(command_line)
1497 combined = read(f"{tmpdir}/combined_split.dat").samples_dict
1498 assert all(param in original.keys() for param in combined.keys())
1499 for param in original.keys():
1500 assert all(sample in combined[param] for sample in original[param])
1501 assert all(sample in original[param] for sample in combined[param])
1503 @pytest.mark.executabletest
1504 def test_split_single_analysis_specific_N_files(self):
1505 """Test that a file containing a single analysis is successfully split
1506 into 10 result files
1507 """
1508 from pesummary.io import read
1509 command_line = (
1510 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1511 f"--outdir {tmpdir}/split --N_files 10"
1512 )
1513 self.launch(command_line)
1514 original = read(f"{tmpdir}/test.json").samples_dict
1515 files = glob.glob(f"{tmpdir}/split/*.json")
1516 assert len(files) == 10
1517 for num, f in enumerate(files):
1518 g = read(f).samples_dict
1519 for param in g.keys():
1520 assert all(sample in original[param] for sample in g[param])
1522 @pytest.mark.executabletest
1523 def test_split_multi_analysis(self):
1524 """Test that a file containing multiple analyses is successfully split
1525 into N_samples result files
1526 """
1527 from pesummary.io import read
1528 command_line = (
1529 f"summarycombine --webdir {tmpdir} --samples {tmpdir}/test.json "
1530 f"{tmpdir}/test.h5 --labels one two"
1531 )
1532 self.launch(command_line)
1533 command_line = (
1534 f"summarysplit --samples {tmpdir}/samples/posterior_samples.h5 "
1535 f"--file_format hdf5 --outdir {tmpdir}/split"
1536 )
1537 self.launch(command_line)
1538 assert os.path.isdir(f"{tmpdir}/split/one")
1539 assert os.path.isdir(f"{tmpdir}/split/two")
1540 zipped = zip(["one", "two"], [f"{tmpdir}/test.json", f"{tmpdir}/test.h5"])
1541 for analysis, f in zipped:
1542 original = read(f).samples_dict
1543 files = glob.glob(f"{tmpdir}/split/{analysis}/*.hdf5")
1544 assert len(files) == original.number_of_samples
1545 for num, g in enumerate(files):
1546 h = read(g).samples_dict
1547 assert h.number_of_samples == 1
1548 idx = int(g.split("/")[-1].split("_")[-1].split(".")[0])
1549 for param in h.keys():
1550 assert h[param] == original[param][idx]
1552class TestSummaryExtract(Base):
1553 """Test the `summaryextract` executable
1554 """
1555 def setup_method(self):
1556 """Setup the SummaryExtract class
1557 """
1558 if not os.path.isdir(tmpdir):
1559 os.mkdir(tmpdir)
1560 make_result_file(outdir=tmpdir, gw=False, extension="json")
1561 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1562 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
1563 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1565 def teardown_method(self):
1566 """Remove the files and directories created from this class
1567 """
1568 if os.path.isdir(tmpdir):
1569 shutil.rmtree(tmpdir)
1571 @pytest.mark.executabletest
1572 def test_extract(self):
1573 """Test that a set if posterior samples are correctly extracted
1574 """
1575 from pesummary.io import read
1576 command_line = (
1577 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1578 f"--labels one two --webdir {tmpdir}"
1579 )
1580 self.launch(command_line)
1581 command_line = (
1582 f"summaryextract --outdir {tmpdir} --filename one.dat --file_format dat "
1583 f"--samples {tmpdir}/samples/posterior_samples.h5 --label one"
1584 )
1585 self.launch(command_line)
1586 assert os.path.isfile(f"{tmpdir}/one.dat")
1587 extracted = read(f"{tmpdir}/one.dat").samples_dict
1588 original = read(f"{tmpdir}/example.json").samples_dict
1589 assert all(param in extracted.keys() for param in original.keys())
1590 np.testing.assert_almost_equal(extracted.samples, original.samples)
1591 command_line = (
1592 f"summaryextract --outdir {tmpdir} --filename one.h5 --label one "
1593 "--file_format pesummary "
1594 f"--samples {tmpdir}/samples/posterior_samples.h5 "
1595 )
1596 self.launch(command_line)
1597 assert os.path.isfile(f"{tmpdir}/one.h5")
1598 extracted = read(f"{tmpdir}/one.h5").samples_dict
1599 assert "dataset" in extracted.keys()
1600 assert all(param in extracted["dataset"].keys() for param in original.keys())
1601 np.testing.assert_almost_equal(extracted["dataset"].samples, original.samples)
1604class TestSummaryCombine_Posteriors(Base):
1605 """Test the `summarycombine_posteriors` executable
1606 """
1607 def setup_method(self):
1608 """Setup the SummaryCombine_Posteriors class
1609 """
1610 if not os.path.isdir(tmpdir):
1611 os.mkdir(tmpdir)
1612 make_result_file(outdir=tmpdir, gw=True, extension="json")
1613 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1614 make_result_file(outdir=tmpdir, gw=True, extension="hdf5")
1615 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1616 make_result_file(outdir=tmpdir, gw=True, extension="dat")
1617 os.rename(f"{tmpdir}/test.dat", f"{tmpdir}/example3.dat")
1619 def teardown_method(self):
1620 """Remove the files and directories created from this class
1621 """
1622 if os.path.isdir(tmpdir):
1623 shutil.rmtree(tmpdir)
1625 @pytest.mark.executabletest
1626 def test_combine(self):
1627 """Test that the two posteriors are combined
1628 """
1629 from pesummary.io import read
1630 command_line = (
1631 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1632 f"--file_format dat --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1633 "--labels one two --weights 0.5 0.5 --seed 12345"
1634 )
1635 self.launch(command_line)
1636 assert os.path.isfile(f"{tmpdir}/test.dat")
1637 combined = read(f"{tmpdir}/test.dat").samples_dict
1638 one = read(f"{tmpdir}/example.json").samples_dict
1639 two = read(f"{tmpdir}/example2.h5").samples_dict
1640 nsamples = combined.number_of_samples
1641 half = int(nsamples / 2.)
1642 for param in combined.keys():
1643 assert all(ss in one[param] for ss in combined[param][:half])
1644 assert all(ss in two[param] for ss in combined[param][half:])
1646 @pytest.mark.executabletest
1647 def test_combine_metafile_failures(self):
1648 """Test that errors are raised when incorrect labels are passed when "
1649 trying to combine posteriors from a single metafile and when trying
1650 to combine posteriors from multiple metafiles
1651 """
1652 command_line = (
1653 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1654 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1655 "--no_conversion"
1656 )
1657 self.launch(command_line)
1658 with pytest.raises(Exception):
1659 command_line = (
1660 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1661 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1662 "--labels one four --weights 0.5 0.5 --seed 12345"
1663 )
1664 self.launch(command_line)
1665 with pytest.raises(Exception):
1666 command_line = (
1667 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1668 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1669 f"{tmpdir}/samples/posterior_samples.h5 --labels one two "
1670 "--weights 0.5 0.5 --seed 12345"
1671 )
1672 self.launch(command_line)
1673 with pytest.raises(Exception):
1674 command_line = (
1675 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1676 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1677 f"{tmpdir}/example3.dat --labels one two --weights 0.5 0.5 --seed 12345"
1678 )
1679 self.launch(command_line)
1681 @pytest.mark.executabletest
1682 def test_combine_metafile(self):
1683 """Test that the two posteriors are combined when a single metafile
1684 is provided
1685 """
1686 from pesummary.io import read
1687 command_line = (
1688 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1689 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1690 "--no_conversion"
1691 )
1692 self.launch(command_line)
1693 command_line = (
1694 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1695 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1696 "--labels one two --weights 0.5 0.5 --seed 12345"
1697 )
1698 self.launch(command_line)
1699 assert os.path.isfile(f"{tmpdir}/test.dat")
1700 combined = read(f"{tmpdir}/test.dat").samples_dict
1701 one = read(f"{tmpdir}/example.json").samples_dict
1702 two = read(f"{tmpdir}/example2.h5").samples_dict
1703 nsamples = combined.number_of_samples
1704 half = int(nsamples / 2.)
1705 for param in combined.keys():
1706 assert all(ss in one[param] for ss in combined[param][:half])
1707 assert all(ss in two[param] for ss in combined[param][half:])
1709 # test that you add the samples to the original file
1710 command_line = (
1711 f"summarycombine_posteriors --outdir {tmpdir} --filename test.h5 "
1712 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1713 "--labels one two --weights 0.5 0.5 --seed 12345 --add_to_existing"
1714 )
1715 self.launch(command_line)
1716 assert os.path.isfile(f"{tmpdir}/test.h5")
1717 combined = read(f"{tmpdir}/test.h5")
1718 combined_samples = combined.samples_dict
1719 assert "one_two_combined" in combined.labels
1720 assert "one_two_combined" in combined_samples.keys()
1721 combined_samples = combined_samples["one_two_combined"]
1722 for param in combined_samples.keys():
1723 assert all(ss in one[param] for ss in combined_samples[param][:half])
1724 assert all(ss in two[param] for ss in combined_samples[param][half:])
1725 # check that summarypages works fine on output
1726 command_line = (
1727 f"summarypages --webdir {tmpdir}/combined "
1728 f" --no_conversion --samples {tmpdir}/test.h5 "
1729 "--disable_corner --disable_interactive --gw"
1730 )
1731 self.launch(command_line)
1732 assert os.path.isfile(f"{tmpdir}/combined/samples/posterior_samples.h5")
1733 output = read(f"{tmpdir}/combined/samples/posterior_samples.h5")
1734 assert "one_two_combined" in output.labels
1737class TestSummaryModify(Base):
1738 """Test the `summarymodify` executable
1739 """
1740 def setup_method(self):
1741 """Setup the SummaryModify class
1742 """
1743 if not os.path.isdir(tmpdir):
1744 os.mkdir(tmpdir)
1745 make_result_file(
1746 pesummary=True, pesummary_label="replace", extension="hdf5",
1747 outdir=tmpdir
1748 )
1750 def teardown_method(self):
1751 """Remove the files and directories created from this class
1752 """
1753 if os.path.isdir(tmpdir):
1754 shutil.rmtree(tmpdir)
1756 @pytest.mark.executabletest
1757 def test_preferred(self):
1758 """Test that the preferred run is correctly specified in the meta file
1759 """
1760 from pesummary.io import read
1761 make_result_file(extension="json", bilby=True, gw=True, outdir=tmpdir)
1762 make_result_file(extension="dat", gw=True, outdir=tmpdir)
1763 command_line = (
1764 "summarycombine --webdir {0} --samples {0}/test.json "
1765 "{0}/test.dat --no_conversion --gw --labels one two "
1766 "--nsamples 100".format(
1767 tmpdir
1768 )
1769 )
1770 self.launch(command_line)
1771 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1772 assert f.preferred is None
1773 command_line = (
1774 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1775 "--webdir {0} --preferred two".format(tmpdir)
1776 )
1777 self.launch(command_line)
1778 f = read("{0}/modified_posterior_samples.h5".format(tmpdir))
1779 assert f.preferred == "two"
1781 @pytest.mark.executabletest
1782 def test_descriptions(self):
1783 """Test that the descriptions are correctly replaced in the meta file
1784 """
1785 import json
1786 import h5py
1788 command_line = (
1789 'summarymodify --webdir {0} --samples {0}/test.h5 '
1790 '--descriptions replace:TestingSummarymodify'.format(tmpdir)
1791 )
1792 self.launch(command_line)
1793 modified_data = h5py.File(
1794 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1795 )
1796 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1797 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1798 if "description" in original_data["replace"].keys():
1799 assert original_data["replace"]["description"][0] != b'TestingSummarymodify'
1800 assert modified_data["replace"]["description"][0] == b'TestingSummarymodify'
1801 modified_data.close()
1802 original_data.close()
1804 with open("{}/descriptions.json".format(tmpdir), "w") as f:
1805 json.dump({"replace": "NewDescription"}, f)
1807 command_line = (
1808 'summarymodify --webdir {0} --samples {0}/test.h5 '
1809 '--descriptions {0}/descriptions.json'.format(tmpdir)
1810 )
1811 self.launch(command_line)
1812 modified_data = h5py.File(
1813 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1814 )
1815 assert modified_data["replace"]["description"][0] == b'NewDescription'
1816 modified_data.close()
1818 @pytest.mark.executabletest
1819 def test_modify_config(self):
1820 """Test that the config file is correctly replaced in the meta file
1821 """
1822 import configparser
1823 import h5py
1824 user = getuser()
1825 config = configparser.ConfigParser()
1826 config.optionxform = str
1827 config.read(data_dir + "/config_lalinference.ini")
1828 config_dictionary = dict(config._sections)
1829 config_dictionary["paths"]["webdir"] = (
1830 "./{}/webdir".format(user)
1831 )
1832 make_result_file(
1833 pesummary=True, pesummary_label="replace", extension="hdf5",
1834 config=config_dictionary, outdir=tmpdir
1835 )
1836 f = h5py.File("{}/test.h5".format(tmpdir), "r")
1837 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1838 bytes("./{}/webdir".format(user), "utf-8")
1839 )
1840 f.close()
1841 config.read(data_dir + "/config_lalinference.ini")
1842 config_dictionary = dict(config._sections)
1843 config_dictionary["paths"]["webdir"] = "./replace/webdir"
1844 with open('{}/replace_config.ini'.format(tmpdir), 'w') as configfile:
1845 config.write(configfile)
1846 command_line = (
1847 "summarymodify --webdir {0} --samples {0}/test.h5 "
1848 "--config replace:{0}/replace_config.ini".format(tmpdir)
1849 )
1850 self.launch(command_line)
1851 f = h5py.File("{}/modified_posterior_samples.h5".format(tmpdir), "r")
1852 assert f["replace"]["config_file"]["paths"]["webdir"][0] != (
1853 bytes("./{}/webdir".format(user), "utf-8")
1854 )
1855 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1856 bytes("./replace/webdir", "utf-8")
1857 )
1858 f.close()
1860 @pytest.mark.executabletest
1861 def test_modify_kwargs_replace(self):
1862 """Test that kwargs are correctly replaced in the meta file
1863 """
1864 import h5py
1866 command_line = (
1867 "summarymodify --webdir {0} --samples {0}/test.h5 "
1868 "--delimiter / --kwargs replace/log_evidence:1000".format(
1869 tmpdir
1870 )
1871 )
1872 self.launch(command_line)
1873 modified_data = h5py.File(
1874 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1875 )
1876 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1877 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1878 assert original_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] != b'1000'
1879 assert modified_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] == b'1000'
1880 modified_data.close()
1881 original_data.close()
1883 @pytest.mark.executabletest
1884 def test_modify_kwargs_append(self):
1885 """Test that kwargs are correctly added to the result file
1886 """
1887 import h5py
1889 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1890 assert "other" not in original_data["replace"]["meta_data"].keys()
1891 original_data.close()
1892 command_line = (
1893 "summarymodify --webdir {0} --samples {0}/test.h5 "
1894 "--delimiter / --kwargs replace/test:10 "
1895 "--overwrite".format(tmpdir)
1896 )
1897 self.launch(command_line)
1898 modified_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1899 assert modified_data["replace"]["meta_data"]["other"]["test"][0] == b'10'
1900 modified_data.close()
1902 @pytest.mark.executabletest
1903 def test_modify_posterior(self):
1904 """Test that a posterior distribution is correctly modified
1905 """
1906 import h5py
1908 new_posterior = np.random.uniform(10, 0.5, 1000)
1909 np.savetxt("{}/different_posterior.dat".format(tmpdir), new_posterior)
1910 command_line = (
1911 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1912 "--replace_posterior replace;mass_1:{0}/different_posterior.dat".format(
1913 tmpdir
1914 )
1915 )
1916 self.launch(command_line)
1917 modified_data = h5py.File(
1918 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1919 )
1920 np.testing.assert_almost_equal(
1921 modified_data["replace"]["posterior_samples"]["mass_1"], new_posterior
1922 )
1923 modified_data.close()
1924 command_line = (
1925 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1926 "--replace_posterior replace;abc:{0}/different_posterior.dat".format(
1927 tmpdir
1928 )
1929 )
1930 self.launch(command_line)
1931 modified_data = h5py.File(
1932 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1933 )
1934 np.testing.assert_almost_equal(
1935 modified_data["replace"]["posterior_samples"]["abc"], new_posterior
1936 )
1937 modified_data.close()
1939 @pytest.mark.executabletest
1940 def test_remove_label(self):
1941 """Test that an analysis is correctly removed
1942 """
1943 from pesummary.io import read
1944 make_result_file(gw=True, extension="json", outdir=tmpdir)
1945 os.rename(
1946 "{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir)
1947 )
1948 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1949 os.rename(
1950 "{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir)
1951 )
1952 make_result_file(gw=True, extension="dat", outdir=tmpdir)
1953 os.rename(
1954 "{}/test.dat".format(tmpdir), "{}/example3.dat".format(tmpdir)
1955 )
1956 command_line = (
1957 "summarycombine --samples {0}/example.json {0}/example2.h5 "
1958 "{0}/example3.dat --labels one two three --webdir {0} "
1959 "--no_conversion".format(tmpdir)
1960 )
1961 self.launch(command_line)
1962 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1963 assert all(label in original.labels for label in ["one", "two", "three"])
1964 command_line = (
1965 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1966 "--remove_label one --webdir {0}".format(tmpdir)
1967 )
1968 self.launch(command_line)
1969 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1970 assert "one" not in f.labels
1971 assert all(label in f.labels for label in ["two", "three"])
1972 _original_samples = original.samples_dict
1973 _samples = f.samples_dict
1974 for label in ["two", "three"]:
1975 np.testing.assert_almost_equal(
1976 _original_samples[label].samples, _samples[label].samples
1977 )
1978 command_line = (
1979 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1980 "--remove_label example --webdir {0}".format(tmpdir)
1981 )
1982 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1983 assert "one" not in f.labels
1984 assert all(label in f.labels for label in ["two", "three"])
1986 @pytest.mark.executabletest
1987 def test_remove_posterior(self):
1988 """Test that a posterior is correctly removed
1989 """
1990 import h5py
1992 command_line = (
1993 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1994 "--remove_posterior replace;mass_1".format(tmpdir)
1995 )
1996 self.launch(command_line)
1997 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1998 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
1999 if isinstance(params[0], bytes):
2000 params = [param.decode("utf-8") for param in params]
2001 assert "mass_1" in params
2002 modified_data = h5py.File(
2003 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2004 )
2005 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2006 original_data.close()
2007 modified_data.close()
2009 @pytest.mark.executabletest
2010 def test_remove_multiple_posteriors(self):
2011 """Test that multiple posteriors are correctly removed
2012 """
2013 import h5py
2015 command_line = (
2016 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
2017 "--remove_posterior replace;mass_1 replace;mass_2".format(
2018 tmpdir
2019 )
2020 )
2021 self.launch(command_line)
2022 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
2023 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
2024 if isinstance(params[0], bytes):
2025 params = [param.decode("utf-8") for param in params]
2026 assert "mass_1" in params
2027 assert "mass_2" in params
2028 modified_data = h5py.File(
2029 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2030 )
2031 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2032 assert "mass_2" not in modified_data["replace"]["posterior_samples"].dtype.names
2033 original_data.close()
2034 modified_data.close()
2036 @pytest.mark.executabletest
2037 def test_store_skymap(self):
2038 """Test that multiple skymaps are correctly stored
2039 """
2040 import astropy_healpix as ah
2041 from ligo.skymap.io.fits import write_sky_map
2042 import h5py
2044 nside = 128
2045 npix = ah.nside_to_npix(nside)
2046 prob = np.random.random(npix)
2047 prob /= sum(prob)
2049 write_sky_map(
2050 '{}/test.fits'.format(tmpdir), prob,
2051 objid='FOOBAR 12345',
2052 gps_time=10494.3,
2053 creator="test",
2054 origin='LIGO Scientific Collaboration',
2055 )
2056 command_line = (
2057 "summarymodify --webdir {0} --samples {0}/test.h5 "
2058 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2059 )
2060 self.launch(command_line)
2061 modified_data = h5py.File(
2062 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2063 )
2064 assert "skymap" in modified_data["replace"].keys()
2065 np.testing.assert_almost_equal(
2066 modified_data["replace"]["skymap"]["data"], prob
2067 )
2068 np.testing.assert_almost_equal(
2069 modified_data["replace"]["skymap"]["meta_data"]["gps_time"][0], 10494.3
2070 )
2071 _creator = modified_data["replace"]["skymap"]["meta_data"]["creator"][0]
2072 if isinstance(_creator, bytes):
2073 _creator = _creator.decode("utf-8")
2074 assert _creator == "test"
2076 command_line = (
2077 "summarymodify --webdir {0} "
2078 "--samples {0}/modified_posterior_samples.h5 "
2079 "--store_skymap replace:{0}/test.fits --force_replace".format(
2080 tmpdir
2081 )
2082 )
2083 self.launch(command_line)
2084 command_line = (
2085 "summarypages --webdir {0}/webpage --gw --no_conversion "
2086 "--samples {0}/modified_posterior_samples.h5 ".format(tmpdir)
2087 )
2088 self.launch(command_line)
2089 data = h5py.File(
2090 "{}/webpage/samples/posterior_samples.h5".format(tmpdir), "r"
2091 )
2092 np.testing.assert_almost_equal(data["replace"]["skymap"]["data"], prob)
2093 data.close()
2094 with pytest.raises(ValueError):
2095 command_line = (
2096 "summarymodify --webdir {0} "
2097 "--samples {0}/modified_posterior_samples.h5 "
2098 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2099 )
2100 self.launch(command_line)
2102 @pytest.mark.executabletest
2103 def test_modify(self):
2104 """Test the `summarymodify` script
2105 """
2106 import h5py
2108 command_line = (
2109 "summarymodify --webdir {0} --samples {0}/test.h5 "
2110 "--labels replace:new".format(tmpdir)
2111 )
2112 self.launch(command_line)
2113 modified_data = h5py.File(
2114 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2115 )
2116 data = h5py.File("{}/test.h5".format(tmpdir), "r")
2117 assert "replace" not in list(modified_data.keys())
2118 assert "new" in list(modified_data.keys())
2119 for key in data["replace"].keys():
2120 assert key in modified_data["new"].keys()
2121 for i, j in zip(data["replace"][key], modified_data["new"][key]):
2122 try:
2123 if isinstance(data["replace"][key][i],h5py._hl.dataset.Dataset):
2124 try:
2125 assert all(k == l for k, l in zip(
2126 data["replace"][key][i],
2127 modified_data["new"][key][j]
2128 ))
2129 except ValueError:
2130 assert all(
2131 all(m == n for m, n in zip(k, l)) for k, l in zip(
2132 data["replace"][key][i],
2133 modified_data["new"][key][j]
2134 )
2135 )
2136 except TypeError:
2137 pass
2138 data.close()
2139 modified_data.close()
2142class TestSummaryRecreate(Base):
2143 """Test the `summaryrecreate` executable
2144 """
2145 def setup_method(self):
2146 """Setup the SummaryRecreate class
2147 """
2148 import configparser
2150 if not os.path.isdir(tmpdir):
2151 os.mkdir(tmpdir)
2152 config = configparser.ConfigParser()
2153 config.optionxform = str
2154 config.read(data_dir + "/config_lalinference.ini")
2155 config_dictionary = dict(config._sections)
2156 config_dictionary["paths"]["webdir"] = (
2157 "./{}/webdir".format(getuser())
2158 )
2159 make_result_file(
2160 pesummary=True, pesummary_label="recreate", extension="hdf5",
2161 config=config_dictionary, outdir=tmpdir
2162 )
2163 with open("GW150914.txt", "w") as f:
2164 f.writelines(["115"])
2166 def teardown_method(self):
2167 """Remove the files and directories created from this class
2168 """
2169 if os.path.isdir(tmpdir):
2170 shutil.rmtree(tmpdir)
2172 @pytest.mark.executabletest
2173 def test_recreate(self):
2174 """Test the `summaryrecreate` script
2175 """
2176 import configparser
2178 command_line = (
2179 "summaryrecreate --rundir {0} --samples {0}/test.h5 ".format(
2180 tmpdir
2181 )
2182 )
2183 self.launch(command_line)
2184 assert os.path.isdir(os.path.join(tmpdir, "recreate"))
2185 assert os.path.isfile(os.path.join(tmpdir, "recreate", "config.ini"))
2186 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir"))
2187 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir", "caches"))
2188 config = configparser.ConfigParser()
2189 config.read(os.path.join(tmpdir, "recreate", "config.ini"))
2190 original_config = configparser.ConfigParser()
2191 original_config.read(data_dir + "/config_lalinference.ini")
2192 for a, b in zip(
2193 sorted(config.sections()), sorted(original_config.sections())
2194 ):
2195 assert a == b
2196 for key, item in config[a].items():
2197 assert config[b][key] == item
2198 command_line = (
2199 "summaryrecreate --rundir {0}_modify --samples {0}/test.h5 "
2200 "--config_override approx:IMRPhenomPv3HM srate:4096".format(
2201 tmpdir
2202 )
2203 )
2204 self.launch(command_line)
2205 config = configparser.ConfigParser()
2206 config.read(os.path.join("{}_modify".format(tmpdir), "recreate", "config.ini"))
2207 original_config = configparser.ConfigParser()
2208 original_config.read(data_dir + "/config_lalinference.ini")
2209 for a, b in zip(
2210 sorted(config.sections()), sorted(original_config.sections())
2211 ):
2212 assert a == b
2213 for key, item in config[a].items():
2214 if key == "approx":
2215 assert original_config[b][key] != item
2216 assert config[b][key] == "IMRPhenomPv3HM"
2217 elif key == "srate":
2218 assert original_config[b][key] != item
2219 assert config[b][key] == "4096"
2220 elif key == "webdir":
2221 pass
2222 else:
2223 assert original_config[b][key] == item
2226class TestSummaryCompare(Base):
2227 """Test the SummaryCompare executable
2228 """
2229 def setup_method(self):
2230 """Setup the SummaryCompare class
2231 """
2232 if not os.path.isdir(tmpdir):
2233 os.mkdir(tmpdir)
2235 def teardown_method(self):
2236 """Remove the files and directories created from this class
2237 """
2238 if os.path.isdir(tmpdir):
2239 shutil.rmtree(tmpdir)
2241 @pytest.mark.executabletest
2242 def test_example_in_docs(self):
2243 """Test that the code runs for the example in the docs
2244 """
2245 import numpy as np
2246 from pesummary.io import write
2248 parameters = ["a", "b", "c", "d"]
2249 data = np.random.random([100, 4])
2250 write(
2251 parameters, data, file_format="dat", outdir=tmpdir,
2252 filename="example1.dat"
2253 )
2254 parameters2 = ["a", "b", "c", "d", "e"]
2255 data2 = np.random.random([100, 5])
2256 write(
2257 parameters2, data2, file_format="json", outdir=tmpdir,
2258 filename="example2.json"
2259 )
2260 command_line = (
2261 "summarycompare --samples {0}/example1.dat "
2262 "{0}/example2.json --properties_to_compare posterior_samples "
2263 "-v --generate_comparison_page --webdir {0}".format(
2264 tmpdir
2265 )
2266 )
2267 self.launch(command_line)
2270class TestSummaryJSCompare(Base):
2271 """Test the `summaryjscompare` executable
2272 """
2273 def setup_method(self):
2274 """Setup the SummaryJSCompare class
2275 """
2276 self.dirs = [tmpdir]
2277 for dd in self.dirs:
2278 if not os.path.isdir(dd):
2279 os.mkdir(dd)
2281 def teardown_method(self):
2282 """Remove the files and directories created from this class
2283 """
2284 for dd in self.dirs:
2285 if os.path.isdir(dd):
2286 shutil.rmtree(dd)
2288 @pytest.mark.executabletest
2289 def test_runs_on_core_file(self):
2290 """Test that the code successfully generates a plot for 2 core result files
2291 """
2292 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2293 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2294 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2295 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
2296 command_line = (
2297 "summaryjscompare --event test-bilby1-bilby2 --main_keys a b c d "
2298 "--webdir {0} --samples {0}/bilby.json "
2299 "{0}/bilby2.json --labels bilby1 bilby2".format(tmpdir)
2300 )
2301 self.launch(command_line)
2303 @pytest.mark.executabletest
2304 def test_runs_on_gw_file(self):
2305 """Test that the code successfully generates a plot for 2 gw result files
2306 """
2307 make_result_file(outdir=tmpdir, bilby=True, gw=True)
2308 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2309 make_result_file(outdir=tmpdir, lalinference=True)
2310 os.rename("{}/test.hdf5".format(tmpdir), "{}/lalinference.hdf5".format(tmpdir))
2311 command_line = (
2312 "summaryjscompare --event test-bilby-lalinf --main_keys mass_1 "
2313 "mass_2 a_1 a_2 --webdir {0} --samples {0}/bilby.json "
2314 "{0}/lalinference.hdf5 --labels bilby lalinf".format(
2315 tmpdir
2316 )
2317 )
2318 self.launch(command_line)