Coverage for pesummary/tests/executable_test.py: 98.7%
1187 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-09 22:34 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-09 22:34 +0000
1# License under an MIT style license -- see LICENSE.md
3import os
4import shutil
5import glob
6import subprocess
7from getpass import getuser
8import numpy as np
10from .base import (
11 make_result_file, get_list_of_plots, get_list_of_files, data_dir
12)
13import pytest
14from pesummary.utils.exceptions import InputError
15import importlib
16import tempfile
17from pathlib import Path
19tmpdir = Path(tempfile.TemporaryDirectory(prefix=".", dir=".").name).name
21__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
24class Base(object):
25 """Base class for testing the executables
26 """
27 def launch(self, command_line):
28 """
29 """
30 args = command_line.split(" ")
31 executable = args[0]
32 cla = args[1:]
33 module = importlib.import_module("pesummary.cli.{}".format(executable))
34 print(cla)
35 return module.main(args=[i for i in cla if i != " " and i != ""])
38class TestSummaryVersion(Base):
39 """Test the `summaryversion` executable
40 """
41 @pytest.mark.executabletest
42 def test_summaryversion(self):
43 """Test the `summaryversion` output matches pesummary.__version__
44 """
45 from pesummary import __version__
46 import io
47 from contextlib import redirect_stdout
49 f = io.StringIO()
50 with redirect_stdout(f):
51 self.launch("summaryversion")
52 out = f.getvalue()
53 assert out.split("\n")[1] == __version__
56class TestSummaryGracedb(Base):
57 """Test the `summarygracedb` executable with trivial examples
58 """
59 def setup_method(self):
60 """Setup the SummaryPublication class
61 """
62 if not os.path.isdir(tmpdir):
63 os.mkdir(tmpdir)
65 def teardown_method(self):
66 """Remove the files and directories created from this class
67 """
68 if os.path.isdir(tmpdir):
69 shutil.rmtree(tmpdir)
71 @pytest.mark.executabletest
72 def test_fake_event(self):
73 """Test that `summarygracedb` fails when a fake event is provided
74 """
75 from ligo.gracedb import exceptions
76 command_line = "summarygracedb --id S111111m"
77 with pytest.raises(exceptions.HTTPError):
78 self.launch(command_line)
80 @pytest.mark.executabletest
81 def test_output(self):
82 """Test the output from summarygracedb
83 """
84 import json
85 command_line = (
86 f"summarygracedb --id S190412m --output {tmpdir}/output.json"
87 )
88 self.launch(command_line)
89 with open(f"{tmpdir}/output.json", "r") as f:
90 data = json.load(f)
91 assert data["superevent_id"] == "S190412m"
92 assert "em_type" in data.keys()
93 command_line = (
94 f"summarygracedb --id S190412m --output {tmpdir}/output2.json "
95 "--info superevent_id far created"
96 )
97 self.launch(command_line)
98 with open(f"{tmpdir}/output2.json", "r") as f:
99 data2 = json.load(f)
100 assert len(data2) == 3
101 assert all(
102 info in data2.keys() for info in ["superevent_id", "far", "created"]
103 )
104 assert data2["superevent_id"] == data["superevent_id"]
105 assert data2["far"] == data["far"]
106 assert data2["created"] == data["created"]
109class TestSummaryDetchar(Base):
110 """Test the `summarydetchar` executable with trivial examples
111 """
112 def setup_method(self):
113 """Setup the SummaryDetchar class
114 """
115 from gwpy.timeseries import TimeSeries
116 if not os.path.isdir(tmpdir):
117 os.mkdir(tmpdir)
119 H1_series = TimeSeries(
120 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
121 )
122 H1_series.write(f"{tmpdir}/H1.gwf", format="gwf")
123 L1_series = TimeSeries(
124 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="L1:test"
125 )
126 L1_series.write(f"{tmpdir}/L1.hdf", format="hdf5")
128 def teardown_method(self):
129 """Remove the files and directories created from this class
130 """
131 if os.path.isdir(tmpdir):
132 shutil.rmtree(tmpdir)
134 @pytest.mark.executabletest
135 def test_spectrogram(self):
136 """Check that a spectrogram can be generated from the `summarydetchar`
137 executable
138 """
139 from gwpy.timeseries import TimeSeries
140 from matplotlib import rcParams
142 rcParams["text.usetex"] = False
143 command_line = (
144 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
145 f"--webdir {tmpdir} --plot spectrogram"
146 )
147 self.launch(command_line)
148 assert os.path.isfile(f"{tmpdir}/spectrogram_H1.png")
149 assert os.path.isfile(f"{tmpdir}/spectrogram_L1.png")
151 @pytest.mark.executabletest
152 def test_omegascan(self):
153 """Check that an omegascan can be generated from the `summarydetchar`
154 executable
155 """
156 from gwpy.timeseries import TimeSeries
157 command_line = (
158 f"summarydetchar --gwdata H1:test:{tmpdir}/H1.gwf L1:test:{tmpdir}/L1.hdf "
159 f"--webdir {tmpdir} --plot omegascan --gps 150 --window 0.1"
160 )
161 self.launch(command_line)
162 assert os.path.isfile(f"{tmpdir}/omegascan_H1.png")
163 assert os.path.isfile(f"{tmpdir}/omegascan_L1.png")
166class TestSummaryPublication(Base):
167 """Test the `summarypublication` executable with trivial examples
168 """
169 def setup_method(self):
170 """Setup the SummaryPublication class
171 """
172 if not os.path.isdir(tmpdir):
173 os.mkdir(tmpdir)
174 make_result_file(bilby=True, gw=True, outdir=tmpdir)
175 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
177 def teardown_method(self):
178 """Remove the files and directories created from this class
179 """
180 if os.path.isdir(tmpdir):
181 shutil.rmtree(tmpdir)
183 @pytest.mark.executabletest
184 def test_2d_contour(self):
185 """Test the 2d contour plot generation
186 """
187 command_line = (
188 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
189 "--labels test --parameters mass_1 mass_2 --levels 0.9 0.5 "
190 "--plot 2d_contour --palette colorblind"
191 )
192 self.launch(command_line)
193 assert os.path.isfile(
194 os.path.join(tmpdir, "2d_contour_plot_mass_1_and_mass_2.png")
195 )
197 @pytest.mark.executabletest
198 def test_violin(self):
199 """Test the violin plot generation
200 """
201 command_line = (
202 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
203 "--labels test --parameters mass_1 --plot violin "
204 "--palette colorblind"
205 )
206 self.launch(command_line)
207 assert os.path.isfile(
208 os.path.join(tmpdir, "violin_plot_mass_1.png")
209 )
211 @pytest.mark.executabletest
212 def test_spin_disk(self):
213 """Test the spin disk generation
214 """
215 command_line = (
216 f"summarypublication --webdir {tmpdir} --samples {tmpdir}/bilby.json "
217 "--labels test --parameters mass_1 --plot spin_disk "
218 "--palette colorblind"
219 )
220 self.launch(command_line)
221 assert os.path.isfile(
222 os.path.join(tmpdir, "spin_disk_plot_test.png")
223 )
226class TestSummaryPipe(Base):
227 """Test the `summarypipe` executable with trivial examples
228 """
229 def setup_method(self):
230 """Setup the SummaryPipe class
231 """
232 self.dirs = [
233 tmpdir, "{}/lalinference".format(tmpdir), "{}/bilby".format(tmpdir),
234 "{}/lalinference/posterior_samples".format(tmpdir),
235 "{}/lalinference/ROQdata".format(tmpdir),
236 "{}/lalinference/engine".format(tmpdir),
237 "{}/lalinference/caches".format(tmpdir),
238 "{}/lalinference/log".format(tmpdir),
239 "{}/bilby/data".format(tmpdir), "{}/bilby/result".format(tmpdir),
240 "{}/bilby/submit".format(tmpdir),
241 "{}/bilby/log_data_analysis".format(tmpdir)
242 ]
243 for dd in self.dirs:
244 if not os.path.isdir(dd):
245 os.mkdir(dd)
246 make_result_file(
247 gw=False, lalinference=True,
248 outdir="{}/lalinference/posterior_samples/".format(tmpdir)
249 )
250 os.rename(
251 "{}/lalinference/posterior_samples/test.hdf5".format(tmpdir),
252 "{}/lalinference/posterior_samples/posterior_HL_result.hdf5".format(tmpdir)
253 )
254 make_result_file(
255 gw=False, bilby=True, outdir="{}/bilby/result/".format(tmpdir)
256 )
257 os.rename(
258 "{}/bilby/result/test.json".format(tmpdir),
259 "{}/bilby/result/label_result.json".format(tmpdir)
260 )
262 def add_config_file(self):
263 shutil.copyfile(
264 os.path.join(data_dir, "config_lalinference.ini"),
265 "{}/lalinference/config.ini".format(tmpdir)
266 )
267 shutil.copyfile(
268 os.path.join(data_dir, "config_bilby.ini"),
269 "{}/bilby/config.ini".format(tmpdir)
270 )
272 def teardown_method(self):
273 """Remove the files and directories created from this class
274 """
275 for dd in self.dirs:
276 if os.path.isdir(dd):
277 shutil.rmtree(dd)
279 @pytest.mark.executabletest
280 def test_no_config(self):
281 """Test that the code fails if there is no config file in the
282 directory
283 """
284 for _type in ["lalinference", "bilby"]:
285 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
286 with pytest.raises(FileNotFoundError):
287 self.launch(command_line)
289 @pytest.mark.executabletest
290 def test_no_samples(self):
291 """Test that the code fails if there are no posterior samples in the
292 directory
293 """
294 self.add_config_file()
295 for _type in ["lalinference", "bilby"]:
296 if _type == "lalinference":
297 os.remove(
298 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
299 tmpdir, _type
300 )
301 )
302 else:
303 os.remove(
304 "{}/{}/result/label_result.json".format(tmpdir, _type)
305 )
306 command_line = "summarypipe --rundir {}/{}".format(tmpdir, _type)
307 with pytest.raises(FileNotFoundError):
308 self.launch(command_line)
310 @pytest.mark.executabletest
311 def test_basic(self):
312 """Test that the code runs for a trivial example
313 """
314 self.add_config_file()
315 for _type in ["lalinference", "bilby"]:
316 command_line = (
317 "summarypipe --rundir {}/{} --return_string".format(tmpdir, _type)
318 )
319 output = self.launch(command_line)
320 assert "--config" in output
321 print(output)
322 print("{}/{}/config.ini".format(tmpdir, _type))
323 assert "{}/{}/config.ini".format(tmpdir, _type) in output
324 assert "--samples" in output
325 if _type == "lalinference":
326 _f = (
327 "{}/{}/posterior_samples/posterior_HL_result.hdf5".format(
328 tmpdir, _type
329 )
330 )
331 else:
332 _f = "{}/{}/result/label_result.json".format(tmpdir, _type)
333 assert _f in output
334 assert "--webdir" in output
335 assert "--approximant" in output
336 assert "--labels" in output
338 @pytest.mark.executabletest
339 def test_override(self):
340 """Test that when you provide an option from the command line it
341 overrides the one inferred from the rundir
342 """
343 self.add_config_file()
344 command_line = (
345 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
346 )
347 output = self.launch(command_line)
348 command_line += " --labels hello"
349 output2 = self.launch(command_line)
350 assert output != output2
351 label = output.split(" ")[output.split(" ").index("--labels") + 1]
352 label2 = output2.split(" ")[output2.split(" ").index("--labels") + 1]
353 assert label != label2
354 assert label2 == "hello"
356 @pytest.mark.executabletest
357 def test_add_to_summarypages_command(self):
358 """Test that when you provide an option from the command line that
359 is not already in the summarypages command line, it adds it to the one
360 inferred from the rundir
361 """
362 self.add_config_file()
363 command_line = (
364 "summarypipe --rundir {}/lalinference --return_string".format(tmpdir)
365 )
366 output = self.launch(command_line)
367 command_line += " --multi_process 10 --kde_plot --cosmology Planck15_lal"
368 output2 = self.launch(command_line)
369 assert output != output2
370 assert "--multi_process 10" in output2
371 assert "--cosmology Planck15_lal" in output2
372 assert "--kde_plot" in output2
373 assert "--multi_process 10" not in output
374 assert "--cosmology Planck15_lal" not in output
375 assert "--kde_plot" not in output
378class TestSummaryPages(Base):
379 """Test the `summarypages` executable with trivial examples
380 """
381 def setup_method(self):
382 """Setup the SummaryClassification class
383 """
384 self.dirs = [tmpdir, "{}1".format(tmpdir), "{}2".format(tmpdir)]
385 for dd in self.dirs:
386 if not os.path.isdir(dd):
387 os.mkdir(dd)
388 make_result_file(outdir=tmpdir, gw=False, extension="json")
389 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
390 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
391 os.rename("{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir))
393 def teardown_method(self):
394 """Remove the files and directories created from this class
395 """
396 for dd in self.dirs:
397 if os.path.isdir(dd):
398 shutil.rmtree(dd)
400 def check_output(
401 self, number=1, mcmc=False, existing_plot=False, expert=False,
402 gw=False
403 ):
404 """Check the output from the summarypages executable
405 """
406 assert os.path.isfile("{}/home.html".format(tmpdir))
407 plots = get_list_of_plots(
408 gw=gw, number=number, mcmc=mcmc, existing_plot=existing_plot,
409 expert=expert, outdir=tmpdir
410 )
411 for i, j in zip(
412 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
413 ):
414 print(i, j)
415 assert all(
416 i == j for i, j in zip(
417 sorted(plots), sorted(glob.glob("{}/plots/*.png".format(tmpdir)))
418 )
419 )
420 files = get_list_of_files(
421 gw=gw, number=number, existing_plot=existing_plot, outdir=tmpdir
422 )
423 assert all(
424 i == j for i, j in zip(
425 sorted(files), sorted(glob.glob("{}/html/*.html".format(tmpdir)))
426 )
427 )
429 @pytest.mark.executabletest
430 def test_descriptions(self):
431 """Check that summarypages stores the correct descriptions when the
432 `--descriptions` flag is provided
433 """
434 import json
435 from pesummary.io import read
436 command_line = (
437 "summarypages --webdir {0} --samples {0}/example.json "
438 "{0}/example.json --labels core0 core1 --nsamples 100 "
439 "--disable_corner --descriptions core0:Description".format(tmpdir)
440 )
441 self.launch(command_line)
442 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
443 assert opened.description["core0"] == "Description"
444 assert opened.description["core1"] == "No description found"
446 with open("{}/descriptions.json".format(tmpdir), "w") as f:
447 json.dump({"core0": "Testing description", "core1": "Test"}, f)
448 command_line = (
449 "summarypages --webdir {0} --samples {0}/example.json "
450 "{0}/example.json --labels core0 core1 --nsamples 100 "
451 "--disable_corner --descriptions {0}/descriptions.json".format(tmpdir)
452 )
453 self.launch(command_line)
454 opened = read("{}/samples/posterior_samples.h5".format(tmpdir))
455 assert opened.description["core0"] == "Testing description"
456 assert opened.description["core1"] == "Test"
458 @pytest.mark.executabletest
459 def test_reweight(self):
460 """Check that summarypages reweights the posterior samples if the
461 `--reweight_samples` flag is provided
462 """
463 from pesummary.io import read
464 make_result_file(gw=True, extension="json", outdir=tmpdir)
465 command_line = (
466 "summarypages --webdir {0} --samples {0}/test.json --gw "
467 "--labels gw0 --nsamples 100 --disable_corner "
468 "--reweight_samples uniform_in_comoving_volume ".format(tmpdir)
469 )
470 self.launch(command_line)
471 self.check_output(number=1, expert=False, gw=True)
472 original = read("{0}/test.json".format(tmpdir)).samples_dict
473 _reweighted = read("{0}/samples/posterior_samples.h5".format(tmpdir))
474 reweighted = _reweighted.samples_dict
475 assert original.number_of_samples >= reweighted["gw0"].number_of_samples
476 inds = np.array([
477 original.parameters.index(param) for param in
478 reweighted["gw0"].parameters if param in original.parameters
479 ])
480 assert all(
481 reweighted_sample[inds] in original.samples.T for reweighted_sample
482 in reweighted["gw0"].samples.T
483 )
484 _kwargs = _reweighted.extra_kwargs[0]
485 assert _kwargs["sampler"]["nsamples_before_reweighting"] == 100
486 assert _kwargs["sampler"]["nsamples"] == reweighted["gw0"].number_of_samples
487 assert _kwargs["meta_data"]["reweighting"] == "uniform_in_comoving_volume"
489 @pytest.mark.executabletest
490 def test_checkpoint(self):
491 """Check that when restarting from checkpoint, the outputs are
492 consistent
493 """
494 import time
495 command_line = (
496 "summarypages --webdir {0} --samples {0}/example.json "
497 "--labels core0 --nsamples 100 "
498 "--restart_from_checkpoint".format(tmpdir)
499 )
500 t0 = time.time()
501 self.launch(command_line)
502 t1 = time.time()
503 assert os.path.isfile("{}/checkpoint/pesummary_resume.pickle".format(tmpdir))
504 self.check_output(number=1, expert=False)
505 t2 = time.time()
506 self.launch(command_line)
507 t3 = time.time()
508 assert t3 - t2 < t1 - t0
509 self.check_output(number=1, expert=False)
510 # get timestamp of plot
511 made_time = os.path.getmtime(glob.glob("{}/plots/*.png".format(tmpdir))[0])
512 assert made_time < t2
514 @pytest.mark.executabletest
515 def test_expert(self):
516 """Check that summarypages produces the expected expert diagnostic
517 plots
518 """
519 command_line = (
520 "summarypages --webdir {0} --samples {0}/example.json "
521 "--labels core0 --nsamples 100".format(tmpdir)
522 )
523 self.launch(command_line)
524 self.check_output(number=1, expert=False)
525 command_line = (
526 "summarypages --webdir {0} --samples {0}/example.json "
527 "--labels core0 --nsamples 100 --enable_expert".format(tmpdir)
528 )
529 self.launch(command_line)
530 self.check_output(number=1, expert=True)
532 @pytest.mark.executabletest
533 def test_prior_input(self):
534 """Check that `summarypages` works when a prior file is passed from
535 the command line
536 """
537 import importlib
538 from bilby import gw
540 path = gw.__path__[0]
541 bilby_prior_file = os.path.join(
542 path, "prior_files", "GW150914.prior"
543 )
545 for package in ["core", "gw"]:
546 gw = True if package == "gw" else False
547 module = importlib.import_module(
548 "pesummary.{}.file.read".format(package)
549 )
550 make_result_file(outdir=tmpdir, gw=gw, extension="json")
551 os.rename("{}/test.json".format(tmpdir), "{}/prior.json".format(tmpdir))
552 for _file in ["{}/prior.json".format(tmpdir), bilby_prior_file]:
553 command_line = (
554 "summarypages --webdir {} --samples {}/example.json "
555 "--labels test --prior_file {} --nsamples_for_prior "
556 "10 ".format(tmpdir, tmpdir, _file)
557 )
558 command_line += " --gw" if gw else ""
559 self.launch(command_line)
560 f = module.read("{}/samples/posterior_samples.h5".format(tmpdir))
561 if _file != bilby_prior_file:
562 stored = f.priors["samples"]["test"]
563 f = module.read(_file)
564 original = f.samples_dict
565 for param in original.keys():
566 np.testing.assert_almost_equal(
567 original[param], stored[param]
568 )
569 # Non-bilby prior file will have same number or prior
570 # samples as posterior samples
571 assert len(stored[param]) == 1000
572 else:
573 from bilby.core.prior import PriorDict
575 analytic = f.priors["analytic"]["test"]
576 bilby_prior = PriorDict(filename=bilby_prior_file)
577 for param, value in bilby_prior.items():
578 assert analytic[param] == str(value)
579 params = list(f.priors["samples"]["test"].keys())
580 # A bilby prior file will have 10 prior samples
581 assert len(f.priors["samples"]["test"][params[0]]) == 10
583 @pytest.mark.executabletest
584 def test_calibration_and_psd(self):
585 """Test that the calibration and psd files are passed appropiately
586 """
587 from pesummary.gw.file.read import read
588 from .base import make_psd, make_calibration
590 make_psd(outdir=tmpdir)
591 make_calibration(outdir=tmpdir)
592 command_line = (
593 "summarypages --webdir {0} --samples {0}/example.json "
594 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
595 "--labels test --posterior_samples_filename example.h5 "
596 "--calibration_definition template".format(tmpdir)
597 )
598 self.launch(command_line)
599 f = read("{}/samples/example.h5".format(tmpdir))
600 psd = np.genfromtxt("{}/psd.dat".format(tmpdir))
601 calibration = np.genfromtxt("{}/calibration.dat".format(tmpdir))
602 np.testing.assert_almost_equal(f.psd["test"]["H1"], psd)
603 np.testing.assert_almost_equal(
604 f.priors["calibration"]["test"]["L1"], calibration
605 )
607 @pytest.mark.executabletest
608 def test_strain_data(self):
609 """Test that the gravitational wave data is passed appropiately
610 """
611 from pesummary.io import read
612 from gwpy.timeseries import TimeSeries
614 H1_series = TimeSeries(
615 np.random.uniform(-1, 1, 1000), t0=101, dt=0.1, name="H1:test"
616 )
617 H1_series.write("{}/H1.gwf".format(tmpdir), format="gwf")
618 L1_series = TimeSeries(
619 np.random.uniform(-1, 1, 1000), t0=201, dt=0.2, name="L1:test"
620 )
621 L1_series.write("{}/L1.hdf".format(tmpdir), format="hdf5")
622 command_line = (
623 "summarypages --webdir {0} --samples {0}/example.json "
624 "--gwdata H1:test:{0}/H1.gwf L1:test:{0}/L1.hdf "
625 "--labels test --disable_corner --disable_interactive".format(tmpdir)
626 )
627 self.launch(command_line)
628 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
629 gwdata = f.gwdata
630 assert all(IFO in gwdata.detectors for IFO in ["H1", "L1"])
631 strain = {"H1": H1_series, "L1": L1_series}
632 for IFO in gwdata.detectors:
633 np.testing.assert_almost_equal(gwdata[IFO].value, strain[IFO].value)
634 assert gwdata[IFO].t0 == strain[IFO].t0
635 assert gwdata[IFO].dt == strain[IFO].dt
636 assert gwdata[IFO].unit == strain[IFO].unit
638 @pytest.mark.executabletest
639 def test_gracedb(self):
640 """Test that when the gracedb ID is passed from the command line it is
641 correctly stored in the meta data
642 """
643 from pesummary.gw.file.read import read
645 command_line = (
646 "summarypages --webdir {0} --samples {0}/example.json "
647 "--gracedb G17864 --gw --labels test".format(tmpdir)
648 )
649 self.launch(command_line)
650 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
651 assert "gracedb" in f.extra_kwargs[0]["meta_data"]
652 assert "G17864" == f.extra_kwargs[0]["meta_data"]["gracedb"]["id"]
654 @pytest.mark.executabletest
655 def test_single(self):
656 """Test on a single input
657 """
658 command_line = (
659 "summarypages --webdir {0} --samples "
660 "{0}/example.json --label core0 ".format(tmpdir)
661 )
662 self.launch(command_line)
663 self.check_output(number=1)
665 @pytest.mark.executabletest
666 def test_summarycombine_output(self):
667 """Test on a summarycombine output
668 """
669 from .base import make_psd, make_calibration
671 make_psd(outdir=tmpdir)
672 make_calibration(outdir=tmpdir)
673 command_line = (
674 "summarycombine --webdir {0}1 --samples "
675 "{0}/example.json --label gw0 "
676 "--calibration L1:{0}/calibration.dat --gw".format(tmpdir)
677 )
678 self.launch(command_line)
679 command_line = (
680 "summarycombine --webdir {0}2 --samples "
681 "{0}/example.json --label gw1 "
682 "--psd H1:{0}/psd.dat --gw".format(tmpdir)
683 )
684 self.launch(command_line)
685 command_line = (
686 "summarycombine --webdir {0} --gw --samples "
687 "{0}1/samples/posterior_samples.h5 "
688 "{0}2/samples/posterior_samples.h5 ".format(tmpdir)
689 )
690 self.launch(command_line)
691 command_line = (
692 "summarypages --webdir {0} --gw --samples "
693 "{0}/samples/posterior_samples.h5 ".format(tmpdir)
694 )
695 self.launch(command_line)
697 @pytest.mark.executabletest
698 def test_mcmc(self):
699 """Test the `--mcmc_samples` command line argument
700 """
701 command_line = (
702 "summarypages --webdir {0} --samples "
703 "{0}/example.json {0}/example2.h5 "
704 "--label core0 --mcmc_samples".format(tmpdir)
705 )
706 self.launch(command_line)
707 self.check_output(number=1, mcmc=True)
709 @pytest.mark.executabletest
710 def test_kde_plot(self):
711 """Test that the kde plots work on a single input and on MCMC inputs
712 """
713 command_line = (
714 "summarypages --webdir {0} --samples "
715 "{0}/example.json --label core0 --kde_plot "
716 "".format(tmpdir)
717 )
718 self.launch(command_line)
719 self.check_output(number=1)
720 command_line = (
721 "summarypages --webdir {0} --samples "
722 "{0}/example.json {0}/example2.h5 "
723 "--label core0 --mcmc_samples --kde_plot".format(tmpdir)
724 )
725 self.launch(command_line)
726 self.check_output(number=1, mcmc=True)
728 @pytest.mark.executabletest
729 def test_mcmc_more_than_label(self):
730 """Test that the code fails with the `--mcmc_samples` command line
731 argument when multiple labels are passed.
732 """
733 command_line = (
734 "summarypages --webdir {0} --samples "
735 "{0}/example.json {0}/example2.h5 "
736 "{0}/example.json {0}/example2.h5 "
737 "--label core0 core1 --mcmc_samples".format(tmpdir)
738 )
739 with pytest.raises(InputError):
740 self.launch(command_line)
742 @pytest.mark.executabletest
743 def test_file_format_wrong_number(self):
744 """Test that the code fails with the `--file_format` command line
745 argument when the number of file formats does not match the number of
746 samples
747 """
748 command_line = (
749 "summarypages --webdir {0} --samples "
750 "{0}/example.json {0}/example2.h5 "
751 "--file_format hdf5 json dat".format(tmpdir)
752 )
753 with pytest.raises(InputError):
754 self.launch(command_line)
756 @pytest.mark.executabletest
757 def test_add_existing_plot(self):
758 """Test that an Additional page is made if existing plots are provided
759 to the summarypages executable
760 """
761 with open("{}/test.png".format(tmpdir), "w") as f:
762 f.writelines("")
763 command_line = (
764 "summarypages --webdir {0} --samples "
765 "{0}/example.json --label core0 --add_existing_plot "
766 "core0:{0}/test.png ".format(tmpdir)
767 )
768 self.launch(command_line)
769 self.check_output(number=1, existing_plot=True)
770 command_line = (
771 "summarypages --webdir {0} --samples "
772 "{0}/example.json {0}/example.json --label core0 core1 "
773 "--add_existing_plot core0:{0}/test.png core1:{0}/test.png "
774 "".format(tmpdir)
775 )
776 self.launch(command_line)
777 self.check_output(number=2, existing_plot=True)
780class TestSummaryPagesLW(Base):
781 """Test the `summarypageslw` executable
782 """
783 def setup_method(self):
784 """Setup the SummaryPagesLW class
785 """
786 if not os.path.isdir(tmpdir):
787 os.mkdir(tmpdir)
788 make_result_file(bilby=True, gw=True, outdir=tmpdir)
789 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
791 def teardown_method(self):
792 """Remove the files and directories created from this class
793 """
794 if os.path.isdir(tmpdir):
795 shutil.rmtree(tmpdir)
797 def check_output(
798 self, gw=False, number=1, outdir=tmpdir, parameters=[], sections=[],
799 extra_gw_plots=True
800 ):
801 """Check the output from the summarypages executable
802 """
803 assert os.path.isfile("./{}/home.html".format(outdir))
804 plots = get_list_of_plots(
805 gw=gw, number=number, mcmc=False, existing_plot=False,
806 expert=False, parameters=parameters, outdir=outdir,
807 extra_gw_plots=extra_gw_plots
808 )
809 assert all(
810 i in plots for i in glob.glob("{}/plots/*.png".format(outdir))
811 )
812 assert all(
813 i in glob.glob("{}/plots/*.png".format(outdir)) for i in plots
814 )
815 files = get_list_of_files(
816 gw=gw, number=number, existing_plot=False, parameters=parameters,
817 sections=sections, outdir=outdir, extra_gw_pages=extra_gw_plots
818 )
819 assert all(
820 i in files for i in glob.glob("{}/html/*.html".format(outdir))
821 )
822 for i in files:
823 print(i, i in glob.glob("{}/html/*.html".format(outdir)))
824 assert all(
825 i in glob.glob("{}/html/*.html".format(outdir)) for i in files
826 )
828 @pytest.mark.executabletest
829 def test_single(self):
830 """Test that the `summarypageslw` executable works as expected
831 when a single result file is provided
832 """
833 command_line = (
834 "summarypageslw --webdir {0} --samples {0}/bilby.json "
835 "--labels core0 --parameters mass_1 mass_2 "
836 "".format(tmpdir)
837 )
838 self.launch(command_line)
839 self.check_output(parameters=["mass_1", "mass_2"], sections=["M-P"])
840 command_line = (
841 "summarypageslw --webdir {0}/gw --samples {0}/bilby.json "
842 "--labels gw0 --parameters mass_1 mass_2 "
843 "--gw".format(tmpdir)
844 )
845 self.launch(command_line)
846 self.check_output(
847 gw=True, parameters=["mass_1", "mass_2"], sections=["masses"],
848 outdir="{}/gw".format(tmpdir), extra_gw_plots=False
849 )
850 command_line = command_line.replace(
851 "{}/gw".format(tmpdir), "{}/gw2".format(tmpdir)
852 )
853 command_line = command_line.replace("mass_1", "made_up_label")
854 self.launch(command_line)
855 self.check_output(
856 gw=True, parameters=["mass_2"], sections=["masses"],
857 outdir="{}/gw2".format(tmpdir), extra_gw_plots=False
858 )
859 with pytest.raises(Exception):
860 command_line = command_line.replace("mass_2", "made_up_label2")
861 self.launch(command_line)
863 @pytest.mark.executabletest
864 def test_double(self):
865 """Test that the `summarypageslw` executable works as expected
866 when multiple result files are provided
867 """
868 command_line = (
869 "summarypageslw --webdir {0} --samples {0}/bilby.json "
870 "{0}/bilby.json --labels core0 core1 --parameters mass_1 mass_2 "
871 "".format(tmpdir)
872 )
873 self.launch(command_line)
874 self.check_output(
875 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"]
876 )
878 @pytest.mark.executabletest
879 def test_pesummary(self):
880 """Test that the `summarypageslw` executable works as expected
881 for a pesummary metafile
882 """
883 command_line = (
884 "summarycombine --webdir {0} --samples {0}/bilby.json "
885 "{0}/bilby.json --no_conversion --gw --labels core0 core1 "
886 "--nsamples 100".format(tmpdir)
887 )
888 self.launch(command_line)
889 command_line = (
890 "summarypageslw --webdir {0}/lw --samples "
891 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
892 "".format(tmpdir)
893 )
894 self.launch(command_line)
895 self.check_output(
896 number=2, parameters=["mass_1", "mass_2"], sections=["M-P"],
897 outdir="{}/lw".format(tmpdir)
898 )
899 command_line = command_line.replace(
900 "{}/lw".format(tmpdir), "{}/lw2".format(tmpdir)
901 )
902 command_line = command_line.replace("mass_1", "made_up_label")
903 self.launch(command_line)
904 self.check_output(
905 number=2, parameters=["mass_2"], sections=["M-P"],
906 outdir="{}/lw2".format(tmpdir)
907 )
908 make_result_file(bilby=True, gw=False, outdir=tmpdir)
909 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
910 command_line = (
911 "summarycombine --webdir {0} --samples {0}/bilby.json "
912 "{0}/bilby2.json --no_conversion --gw --labels core0 core1 "
913 "--nsamples 100".format(tmpdir)
914 )
915 self.launch(command_line)
916 command_line = (
917 "summarypageslw --webdir {0}/lw3 --samples "
918 "{0}/samples/posterior_samples.h5 --parameters mass_1 mass_2 "
919 "".format(tmpdir)
920 )
921 self.launch(command_line)
922 self.check_output(
923 number=1, parameters=["mass_1", "mass_2"], sections=["M-P"],
924 outdir="{}/lw3".format(tmpdir)
925 )
928class TestSummaryClassification(Base):
929 """Test the `summaryclassification` executable
930 """
931 def setup_method(self):
932 """Setup the SummaryClassification class
933 """
934 if not os.path.isdir(tmpdir):
935 os.mkdir(tmpdir)
936 make_result_file(outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test")
937 os.rename("{}/test.json".format(tmpdir), "{}/pesummary.json".format(tmpdir))
938 make_result_file(outdir=tmpdir, bilby=True, gw=True)
939 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
941 def teardown_method(self):
942 """Remove the files and directories created from this class
943 """
944 if os.path.isdir(tmpdir):
945 shutil.rmtree(tmpdir)
947 def check_output(self):
948 """Check the output from the `summaryclassification` executable
949 """
950 import glob
951 import json
953 files = glob.glob("{}/*".format(tmpdir))
954 assert "{}/test_default_prior_pe_classification.json".format(tmpdir) in files
955 assert "{}/test_default_pepredicates_bar.png".format(tmpdir) in files
956 with open("{}/test_default_prior_pe_classification.json".format(tmpdir), "r") as f:
957 data = json.load(f)
958 assert all(
959 i in data.keys() for i in [
960 "BNS", "NSBH", "BBH", "MassGap", "HasNS", "HasRemnant"
961 ]
962 )
964 @pytest.mark.executabletest
965 def test_result_file(self):
966 """Test the `summaryclassification` executable for a random result file
967 """
968 command_line = (
969 "summaryclassification --webdir {0} --samples "
970 "{0}/bilby.json --prior default --label test".format(tmpdir)
971 )
972 self.launch(command_line)
973 self.check_output()
975 @pytest.mark.executabletest
976 def test_pesummary_file(self):
977 """Test the `summaryclassification` executable for a pesummary metafile
978 """
979 command_line = (
980 "summaryclassification --webdir {0} --samples "
981 "{0}/pesummary.json --prior default".format(tmpdir)
982 )
983 self.launch(command_line)
984 self.check_output()
987class TestSummaryTGR(Base):
988 """Test the `summarytgr` executable
989 """
990 def setup_method(self):
991 """Setup the SummaryTGR class
992 """
993 if not os.path.isdir(tmpdir):
994 os.mkdir(tmpdir)
995 make_result_file(
996 outdir=tmpdir, pesummary=True, gw=True, pesummary_label="test"
997 )
998 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/pesummary.json")
999 make_result_file(outdir=tmpdir, bilby=True, gw=True)
1000 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/bilby.json")
1002 def teardown_method(self):
1003 """Remove the files and directories created from this class
1004 """
1005 if os.path.isdir(tmpdir):
1006 shutil.rmtree(tmpdir)
1008 def check_output(self, diagnostic=True):
1009 """Check the output from the `summarytgr` executable
1010 """
1011 import glob
1013 image_files = glob.glob(f"{tmpdir}/plots/*")
1014 image_base_string = tmpdir + "/plots/primary_imrct_{}.png"
1015 file_strings = ["deviations_triangle_plot"]
1016 if diagnostic:
1017 file_strings += [
1018 "mass_1_mass_2", "a_1_a_2",
1019 "final_mass_non_evolved_final_spin_non_evolved"
1020 ]
1021 for file_string in file_strings:
1022 assert image_base_string.format(file_string) in image_files
1024 @pytest.mark.executabletest
1025 def test_result_file(self):
1026 """Test the `summarytgr` executable for a random result file
1027 """
1028 command_line = (
1029 f"summarytgr --webdir {tmpdir} "
1030 f"--samples {tmpdir}/bilby.json {tmpdir}/bilby.json "
1031 "--test imrct "
1032 "--labels inspiral postinspiral "
1033 "--imrct_kwargs N_bins:11 "
1034 "--make_diagnostic_plots "
1035 "--disable_pe_page_generation"
1036 )
1037 self.launch(command_line)
1038 self.check_output()
1040 @pytest.mark.executabletest
1041 def test_pesummary_file(self):
1042 """Test the `summarytgr` executable for a pesummary metafile
1043 """
1044 command_line = (
1045 f"summarytgr --webdir {tmpdir} --samples "
1046 f"{tmpdir}/pesummary.json {tmpdir}/pesummary.json --labels "
1047 "test:inspiral test:postinspiral --test imrct --imrct_kwargs "
1048 "N_bins:11 --disable_pe_page_generation"
1049 )
1050 self.launch(command_line)
1051 self.check_output(diagnostic=False)
1053 @pytest.mark.executabletest
1054 def test_pdfs_and_gr_quantile(self):
1055 """Test that the GR quantile and pdf matches the LAL implementation
1056 The LAL files were produced by the executable imrtgr_imr_consistency_test
1057 with N_bins=201 dMfbyMf_lim=3 dchifbychif_lim=3 and bbh_average_fits_precessing
1058 """
1059 from pesummary.io import read
1061 make_result_file(outdir="./", extension="dat", gw=True, random_seed=123456789)
1062 os.rename("./test.dat", f"{tmpdir}/inspiral.dat")
1063 make_result_file(outdir="./", extension="dat", gw=True, random_seed=987654321)
1064 os.rename("./test.dat", f"{tmpdir}/postinspiral.dat")
1065 command_line = (
1066 f"summarytgr --webdir {tmpdir} "
1067 f"--samples {tmpdir}/inspiral.dat {tmpdir}/postinspiral.dat "
1068 "--test imrct "
1069 "--labels inspiral postinspiral "
1070 "--imrct_kwargs N_bins:201 final_mass_deviation_lim:3 final_spin_deviation_lim:3 "
1071 "--disable_pe_page_generation"
1072 )
1073 self.launch(command_line)
1074 f = read(f"{tmpdir}/samples/tgr_samples.h5")
1075 pesummary_quantile = f.extra_kwargs["primary"]["GR Quantile (%)"]
1076 probdict = f.imrct_deviation["final_mass_final_spin_deviations"]
1077 lal_pdf = np.loadtxt(os.path.join(data_dir, "lal_pdf_for_summarytgr.dat.gz"))
1078 pesummary_pdf = probdict.probs / probdict.dx / probdict.dy
1080 np.testing.assert_almost_equal(pesummary_quantile, 3.276372814744687306)
1081 np.testing.assert_almost_equal(pesummary_pdf, lal_pdf)
1084class TestSummaryClean(Base):
1085 """Test the `summaryclean` executable
1086 """
1087 def setup_method(self):
1088 """Setup the SummaryClassification class
1089 """
1090 if not os.path.isdir(tmpdir):
1091 os.mkdir(tmpdir)
1093 def teardown_method(self):
1094 """Remove the files and directories created from this class
1095 """
1096 if os.path.isdir(tmpdir):
1097 shutil.rmtree(tmpdir)
1099 @pytest.mark.executabletest
1100 def test_clean(self):
1101 """Test the `summaryclean` executable
1102 """
1103 import h5py
1105 parameters = ["mass_ratio"]
1106 data = [[0.5], [0.5], [-1.5]]
1107 h5py_data = np.array(
1108 [tuple(i) for i in data], dtype=[tuple([i, 'float64']) for i in
1109 parameters]
1110 )
1111 f = h5py.File("{}/test.hdf5".format(tmpdir), "w")
1112 lalinference = f.create_group("lalinference")
1113 nest = lalinference.create_group("lalinference_nest")
1114 samples = nest.create_dataset("posterior_samples", data=h5py_data)
1115 f.close()
1116 command_line = (
1117 "summaryclean --webdir {0} --samples {0}/test.hdf5 "
1118 "--file_format dat --labels test".format(tmpdir)
1119 )
1120 self.launch(command_line)
1121 self.check_output()
1123 def check_output(self):
1124 """Check the output from the `summaryclean` executable
1125 """
1126 from pesummary.gw.file.read import read
1128 f = read("{}/pesummary_test.dat".format(tmpdir))
1129 print(f.samples_dict["mass_ratio"])
1130 assert len(f.samples_dict["mass_ratio"]) == 2
1131 assert all(i == 0.5 for i in f.samples_dict["mass_ratio"])
1134class _SummaryCombine_Metafiles(Base):
1135 """Test the `summarycombine_metafile` executable
1136 """
1137 @pytest.mark.executabletest
1138 def test_combine(self, gw=False):
1139 """Test the executable for 2 metafiles
1140 """
1141 make_result_file(outdir=tmpdir, pesummary=True, pesummary_label="label2")
1142 os.rename("{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir))
1143 make_result_file(outdir=tmpdir, pesummary=True)
1144 command_line = (
1145 "summarycombine --webdir {0} "
1146 "--samples {0}/test.json {0}/test2.json "
1147 "--save_to_json".format(tmpdir)
1148 )
1149 if gw:
1150 command_line += " --gw"
1151 self.launch(command_line)
1153 def check_output(self, gw=False):
1154 if gw:
1155 from pesummary.gw.file.read import read
1156 else:
1157 from pesummary.core.file.read import read
1159 assert os.path.isfile("{}/samples/posterior_samples.json".format(tmpdir))
1160 combined = read("{}/samples/posterior_samples.json".format(tmpdir))
1161 for f in ["{}/test.json".format(tmpdir), "{}/test2.json".format(tmpdir)]:
1162 data = read(f)
1163 labels = data.labels
1164 assert all(i in combined.labels for i in labels)
1165 assert all(
1166 all(
1167 data.samples_dict[j][num] == combined.samples_dict[i][j][num]
1168 for num in range(data.samples_dict[j])
1169 ) for j in data.samples_dict.keys()
1170 )
1173class TestCoreSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1174 """Test the `summarycombine_metafile` executable
1175 """
1176 def setup_method(self):
1177 """Setup the SummaryCombine_Metafiles class
1178 """
1179 if not os.path.isdir(tmpdir):
1180 os.mkdir(tmpdir)
1181 make_result_file(outdir=tmpdir, pesummary=True)
1183 def teardown_method(self):
1184 """Remove the files and directories created from this class
1185 """
1186 if os.path.isdir(tmpdir):
1187 shutil.rmtree(tmpdir)
1189 @pytest.mark.executabletest
1190 def test_combine(self):
1191 """Test the executable for 2 metafiles
1192 """
1193 super(TestCoreSummaryCombine_Metafiles, self).test_combine(gw=False)
1195 def check_output(self):
1196 super(TestCoreSummaryCombine_Metafiles, self).check_output(gw=False)
1199class TestGWSummaryCombine_Metafiles(_SummaryCombine_Metafiles):
1200 """Test the `summarycombine_metafile` executable
1201 """
1202 def setup_method(self):
1203 """Setup the SummaryCombine_Metafiles class
1204 """
1205 if not os.path.isdir(tmpdir):
1206 os.mkdir(tmpdir)
1207 make_result_file(outdir=tmpdir, pesummary=True)
1209 def teardown_method(self):
1210 """Remove the files and directories created from this class
1211 """
1212 if os.path.isdir(tmpdir):
1213 shutil.rmtree(tmpdir)
1215 @pytest.mark.executabletest
1216 def test_combine(self):
1217 """Test the executable for 2 metafiles
1218 """
1219 super(TestGWSummaryCombine_Metafiles, self).test_combine(gw=True)
1221 def check_output(self, gw=True):
1222 super(TestGWSummaryCombine_Metafiles, self).check_output(gw=True)
1225class TestSummaryCombine(Base):
1226 """Test the `summarycombine` executable
1227 """
1228 def setup_method(self):
1229 """Setup the SummaryCombine class
1230 """
1231 self.dirs = [tmpdir]
1232 for dd in self.dirs:
1233 if not os.path.isdir(dd):
1234 os.mkdir(dd)
1236 def teardown_method(self):
1237 """Remove the files and directories created from this class
1238 """
1239 for dd in self.dirs:
1240 if os.path.isdir(dd):
1241 shutil.rmtree(dd)
1243 @pytest.mark.executabletest
1244 def test_disable_prior_sampling(self):
1245 """Test that the code skips prior sampling when the appropiate flag
1246 is provided to the `summarypages` executable
1247 """
1248 from pesummary.io import read
1250 make_result_file(outdir=tmpdir, bilby=True, gw=False)
1251 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
1252 command_line = (
1253 "summarycombine --webdir {0} --samples {0}/bilby.json "
1254 "--labels core0".format(tmpdir)
1255 )
1256 self.launch(command_line)
1257 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1258 assert len(f.priors["samples"]["core0"])
1260 command_line = (
1261 "summarycombine --webdir {0} --samples {0}/bilby.json "
1262 "--disable_prior_sampling --labels core0".format(tmpdir)
1263 )
1264 self.launch(command_line)
1265 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1266 assert not len(f.priors["samples"]["core0"])
1268 @pytest.mark.executabletest
1269 def test_external_hdf5_links(self):
1270 """Test that seperate hdf5 files are made when the
1271 `--external_hdf5_links` command line is passed
1272 """
1273 from pesummary.gw.file.read import read
1274 from .base import make_psd, make_calibration
1276 make_result_file(outdir=tmpdir, gw=True, extension="json")
1277 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1278 make_psd(outdir=tmpdir)
1279 make_calibration(outdir=tmpdir)
1280 command_line = (
1281 "summarycombine --webdir {0} --samples "
1282 "{0}/example.json --label gw0 --external_hdf5_links --gw "
1283 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1284 "--no_conversion".format(tmpdir)
1285 )
1286 self.launch(command_line)
1287 assert os.path.isfile(
1288 os.path.join(tmpdir, "samples", "posterior_samples.h5")
1289 )
1290 assert os.path.isfile(
1291 os.path.join(tmpdir, "samples", "_gw0.h5")
1292 )
1293 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1294 g = read("{}/example.json".format(tmpdir))
1295 h = read("{}/samples/_gw0.h5".format(tmpdir))
1296 np.testing.assert_almost_equal(f.samples[0], g.samples)
1297 np.testing.assert_almost_equal(f.samples[0], h.samples[0])
1298 np.testing.assert_almost_equal(f.psd["gw0"]["H1"], h.psd["gw0"]["H1"])
1299 np.testing.assert_almost_equal(
1300 f.priors["calibration"]["gw0"]["L1"],
1301 h.priors["calibration"]["gw0"]["L1"]
1302 )
1304 @pytest.mark.executabletest
1305 def test_compression(self):
1306 """Test that the metafile is reduced in size when the datasets are
1307 compressed with maximum compression level
1308 """
1309 from pesummary.gw.file.read import read
1310 from .base import make_psd, make_calibration
1312 make_result_file(outdir=tmpdir, gw=True, extension="json")
1313 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1314 make_psd(outdir=tmpdir)
1315 make_calibration(outdir=tmpdir)
1316 command_line = (
1317 "summarycombine --webdir {0} --samples "
1318 "{0}/example.json --label gw0 --no_conversion --gw "
1319 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat ".format(
1320 tmpdir
1321 )
1322 )
1323 self.launch(command_line)
1324 original_size = os.stat("{}/samples/posterior_samples.h5".format(tmpdir)).st_size
1325 command_line = (
1326 "summarycombine --webdir {0} --samples "
1327 "{0}/example.json --label gw0 --no_conversion --gw "
1328 "--psd H1:{0}/psd.dat --calibration L1:{0}/calibration.dat "
1329 "--hdf5_compression 9 --posterior_samples_filename "
1330 "posterior_samples2.h5".format(tmpdir)
1331 )
1332 self.launch(command_line)
1333 compressed_size = os.stat("{}/samples/posterior_samples2.h5".format(tmpdir)).st_size
1334 assert compressed_size < original_size
1336 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1337 g = read("{}/samples/posterior_samples2.h5".format(tmpdir))
1338 posterior_samples = f.samples[0]
1339 posterior_samples2 = g.samples[0]
1340 np.testing.assert_almost_equal(posterior_samples, posterior_samples2)
1342 @pytest.mark.executabletest
1343 def test_seed(self):
1344 """Test that the samples stored in the metafile are identical for two
1345 runs if the random seed is the same
1346 """
1347 from pesummary.gw.file.read import read
1349 make_result_file(outdir=tmpdir, gw=True, extension="json")
1350 os.rename("{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir))
1351 command_line = (
1352 "summarycombine --webdir {0} --samples "
1353 "{0}/example.json --label gw0 --no_conversion --gw "
1354 "--nsamples 10 --seed 1000".format(tmpdir)
1355 )
1356 self.launch(command_line)
1357 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1358 command_line = (
1359 "summarycombine --webdir {0} --samples "
1360 "{0}/example.json --label gw0 --no_conversion --gw "
1361 "--nsamples 10 --seed 2000".format(tmpdir)
1362 )
1363 self.launch(command_line)
1364 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1365 try:
1366 np.testing.assert_almost_equal(
1367 original.samples[0], new.samples[0]
1368 )
1369 raise AssertionError("Failed")
1370 except AssertionError:
1371 pass
1373 command_line = (
1374 "summarycombine --webdir {0} --samples "
1375 "{0}/example.json --label gw0 --no_conversion --gw "
1376 "--nsamples 10 --seed 1000".format(tmpdir)
1377 )
1378 self.launch(command_line)
1379 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1380 command_line = (
1381 "summarycombine --webdir {0} --samples "
1382 "{0}/example.json --label gw0 --no_conversion --gw "
1383 "--nsamples 10 --seed 1000".format(tmpdir)
1384 )
1385 self.launch(command_line)
1386 new = read("{}/samples/posterior_samples.h5".format(tmpdir))
1387 np.testing.assert_almost_equal(
1388 original.samples[0], new.samples[0]
1389 )
1391 @pytest.mark.executabletest
1392 def test_preferred(self):
1393 """Test that the preferred analysis is correctly stored in the metafile
1394 """
1395 from pesummary.io import read
1396 make_result_file(gw=True, extension="json", outdir=tmpdir)
1397 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1398 command_line = (
1399 "summarycombine --webdir {0} --samples "
1400 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1401 "--gw --nsamples 10".format(tmpdir)
1402 )
1403 self.launch(command_line)
1404 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1405 assert f.preferred is None
1406 command_line = (
1407 "summarycombine --webdir {0} --samples "
1408 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1409 "--gw --nsamples 10 --preferred gw1".format(tmpdir)
1410 )
1411 self.launch(command_line)
1412 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1413 assert f.preferred == "gw1"
1414 command_line = (
1415 "summarycombine --webdir {0} --samples "
1416 "{0}/test.json {0}/test.h5 --label gw0 gw1 --no_conversion "
1417 "--gw --nsamples 10 --preferred gw2".format(tmpdir)
1418 )
1419 self.launch(command_line)
1420 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1421 assert f.preferred is None
1424class TestSummaryReview(Base):
1425 """Test the `summaryreview` executable
1426 """
1427 def setup_method(self):
1428 """Setup the SummaryCombine_Metafiles class
1429 """
1430 if not os.path.isdir(tmpdir):
1431 os.mkdir(tmpdir)
1432 make_result_file(outdir=tmpdir, lalinference=True)
1434 def teardown_method(self):
1435 """Remove the files and directories created from this class
1436 """
1437 if os.path.isdir(tmpdir):
1438 shutil.rmtree(tmpdir)
1440 @pytest.mark.executabletest
1441 def test_review(self):
1442 """Test the `summaryreview` script for a `lalinference` result file
1443 """
1444 command_line = (
1445 "summaryreview --webdir {0} --samples {0}/test.hdf5 "
1446 "--test core_plots".format(tmpdir)
1447 )
1448 self.launch(command_line)
1451class TestSummarySplit(Base):
1452 """Test the `summarysplit` executable
1453 """
1454 def setup_method(self):
1455 """Setup the SummarySplit class
1456 """
1457 if not os.path.isdir(tmpdir):
1458 os.mkdir(tmpdir)
1459 make_result_file(outdir=tmpdir, gw=False, extension="json")
1460 make_result_file(outdir=tmpdir, gw=False, extension="hdf5", n_samples=500)
1462 def teardown_method(self):
1463 """Remove the files and directories created from this class
1464 """
1465 if os.path.isdir(tmpdir):
1466 shutil.rmtree(tmpdir)
1468 @pytest.mark.executabletest
1469 def test_split_single_analysis(self):
1470 """Test that a file containing a single analysis is successfully split
1471 into N_samples result files
1472 """
1473 from pesummary.io import read
1474 command_line = (
1475 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1476 f"--outdir {tmpdir}/split"
1477 )
1478 self.launch(command_line)
1479 original = read(f"{tmpdir}/test.json").samples_dict
1480 files = glob.glob(f"{tmpdir}/split/*.json")
1481 assert len(files) == original.number_of_samples
1482 for num, f in enumerate(files):
1483 g = read(f).samples_dict
1484 assert g.number_of_samples == 1
1485 idx = int(f.split("/")[-1].split("_")[-1].split(".")[0])
1486 for param in g.keys():
1487 assert g[param] == original[param][idx]
1488 command_line = (
1489 "summarycombine_posteriors --use_all --samples {} "
1490 f"--outdir {tmpdir} --filename combined_split.dat "
1491 "--file_format dat --labels {}"
1492 ).format(
1493 " ".join(files), " ".join(
1494 np.arange(original.number_of_samples).astype(str)
1495 )
1496 )
1497 self.launch(command_line)
1498 combined = read(f"{tmpdir}/combined_split.dat").samples_dict
1499 assert all(param in original.keys() for param in combined.keys())
1500 for param in original.keys():
1501 assert all(sample in combined[param] for sample in original[param])
1502 assert all(sample in original[param] for sample in combined[param])
1504 @pytest.mark.executabletest
1505 def test_split_single_analysis_specific_N_files(self):
1506 """Test that a file containing a single analysis is successfully split
1507 into 10 result files
1508 """
1509 from pesummary.io import read
1510 command_line = (
1511 f"summarysplit --samples {tmpdir}/test.json --file_format json "
1512 f"--outdir {tmpdir}/split --N_files 10"
1513 )
1514 self.launch(command_line)
1515 original = read(f"{tmpdir}/test.json").samples_dict
1516 files = glob.glob(f"{tmpdir}/split/*.json")
1517 assert len(files) == 10
1518 for num, f in enumerate(files):
1519 g = read(f).samples_dict
1520 for param in g.keys():
1521 assert all(sample in original[param] for sample in g[param])
1523 @pytest.mark.executabletest
1524 def test_split_multi_analysis(self):
1525 """Test that a file containing multiple analyses is successfully split
1526 into N_samples result files
1527 """
1528 from pesummary.io import read
1529 command_line = (
1530 f"summarycombine --webdir {tmpdir} --samples {tmpdir}/test.json "
1531 f"{tmpdir}/test.h5 --labels one two"
1532 )
1533 self.launch(command_line)
1534 command_line = (
1535 f"summarysplit --samples {tmpdir}/samples/posterior_samples.h5 "
1536 f"--file_format hdf5 --outdir {tmpdir}/split"
1537 )
1538 self.launch(command_line)
1539 assert os.path.isdir(f"{tmpdir}/split/one")
1540 assert os.path.isdir(f"{tmpdir}/split/two")
1541 zipped = zip(["one", "two"], [f"{tmpdir}/test.json", f"{tmpdir}/test.h5"])
1542 for analysis, f in zipped:
1543 original = read(f).samples_dict
1544 files = glob.glob(f"{tmpdir}/split/{analysis}/*.hdf5")
1545 assert len(files) == original.number_of_samples
1546 for num, g in enumerate(files):
1547 h = read(g).samples_dict
1548 assert h.number_of_samples == 1
1549 idx = int(g.split("/")[-1].split("_")[-1].split(".")[0])
1550 for param in h.keys():
1551 assert h[param] == original[param][idx]
1553class TestSummaryExtract(Base):
1554 """Test the `summaryextract` executable
1555 """
1556 def setup_method(self):
1557 """Setup the SummaryExtract class
1558 """
1559 if not os.path.isdir(tmpdir):
1560 os.mkdir(tmpdir)
1561 make_result_file(outdir=tmpdir, gw=False, extension="json")
1562 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1563 make_result_file(outdir=tmpdir, gw=False, extension="hdf5")
1564 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1566 def teardown_method(self):
1567 """Remove the files and directories created from this class
1568 """
1569 if os.path.isdir(tmpdir):
1570 shutil.rmtree(tmpdir)
1572 @pytest.mark.executabletest
1573 def test_extract(self):
1574 """Test that a set if posterior samples are correctly extracted
1575 """
1576 from pesummary.io import read
1577 command_line = (
1578 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1579 f"--labels one two --webdir {tmpdir}"
1580 )
1581 self.launch(command_line)
1582 command_line = (
1583 f"summaryextract --outdir {tmpdir} --filename one.dat --file_format dat "
1584 f"--samples {tmpdir}/samples/posterior_samples.h5 --label one"
1585 )
1586 self.launch(command_line)
1587 assert os.path.isfile(f"{tmpdir}/one.dat")
1588 extracted = read(f"{tmpdir}/one.dat").samples_dict
1589 original = read(f"{tmpdir}/example.json").samples_dict
1590 assert all(param in extracted.keys() for param in original.keys())
1591 np.testing.assert_almost_equal(extracted.samples, original.samples)
1592 command_line = (
1593 f"summaryextract --outdir {tmpdir} --filename one.h5 --label one "
1594 "--file_format pesummary "
1595 f"--samples {tmpdir}/samples/posterior_samples.h5 "
1596 )
1597 self.launch(command_line)
1598 assert os.path.isfile(f"{tmpdir}/one.h5")
1599 extracted = read(f"{tmpdir}/one.h5").samples_dict
1600 assert "dataset" in extracted.keys()
1601 assert all(param in extracted["dataset"].keys() for param in original.keys())
1602 np.testing.assert_almost_equal(extracted["dataset"].samples, original.samples)
1605class TestSummaryCombine_Posteriors(Base):
1606 """Test the `summarycombine_posteriors` executable
1607 """
1608 def setup_method(self):
1609 """Setup the SummaryCombine_Posteriors class
1610 """
1611 if not os.path.isdir(tmpdir):
1612 os.mkdir(tmpdir)
1613 make_result_file(outdir=tmpdir, gw=True, extension="json")
1614 os.rename(f"{tmpdir}/test.json", f"{tmpdir}/example.json")
1615 make_result_file(outdir=tmpdir, gw=True, extension="hdf5")
1616 os.rename(f"{tmpdir}/test.h5", f"{tmpdir}/example2.h5")
1617 make_result_file(outdir=tmpdir, gw=True, extension="dat")
1618 os.rename(f"{tmpdir}/test.dat", f"{tmpdir}/example3.dat")
1620 def teardown_method(self):
1621 """Remove the files and directories created from this class
1622 """
1623 if os.path.isdir(tmpdir):
1624 shutil.rmtree(tmpdir)
1626 @pytest.mark.executabletest
1627 def test_combine(self):
1628 """Test that the two posteriors are combined
1629 """
1630 from pesummary.io import read
1631 command_line = (
1632 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1633 f"--file_format dat --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1634 "--labels one two --weights 0.5 0.5 --seed 12345"
1635 )
1636 self.launch(command_line)
1637 assert os.path.isfile(f"{tmpdir}/test.dat")
1638 combined = read(f"{tmpdir}/test.dat").samples_dict
1639 one = read(f"{tmpdir}/example.json").samples_dict
1640 two = read(f"{tmpdir}/example2.h5").samples_dict
1641 nsamples = combined.number_of_samples
1642 half = int(nsamples / 2.)
1643 for param in combined.keys():
1644 assert all(ss in one[param] for ss in combined[param][:half])
1645 assert all(ss in two[param] for ss in combined[param][half:])
1647 @pytest.mark.executabletest
1648 def test_combine_metafile_failures(self):
1649 """Test that errors are raised when incorrect labels are passed when "
1650 trying to combine posteriors from a single metafile and when trying
1651 to combine posteriors from multiple metafiles
1652 """
1653 command_line = (
1654 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1655 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1656 "--no_conversion"
1657 )
1658 self.launch(command_line)
1659 with pytest.raises(Exception):
1660 command_line = (
1661 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1662 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1663 "--labels one four --weights 0.5 0.5 --seed 12345"
1664 )
1665 self.launch(command_line)
1666 with pytest.raises(Exception):
1667 command_line = (
1668 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1669 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1670 f"{tmpdir}/samples/posterior_samples.h5 --labels one two "
1671 "--weights 0.5 0.5 --seed 12345"
1672 )
1673 self.launch(command_line)
1674 with pytest.raises(Exception):
1675 command_line = (
1676 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1677 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1678 f"{tmpdir}/example3.dat --labels one two --weights 0.5 0.5 --seed 12345"
1679 )
1680 self.launch(command_line)
1682 @pytest.mark.executabletest
1683 def test_combine_metafile(self):
1684 """Test that the two posteriors are combined when a single metafile
1685 is provided
1686 """
1687 from pesummary.io import read
1688 command_line = (
1689 f"summarycombine --samples {tmpdir}/example.json {tmpdir}/example2.h5 "
1690 f"{tmpdir}/example3.dat --labels one two three --webdir {tmpdir} "
1691 "--no_conversion"
1692 )
1693 self.launch(command_line)
1694 command_line = (
1695 f"summarycombine_posteriors --outdir {tmpdir} --filename test.dat "
1696 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1697 "--labels one two --weights 0.5 0.5 --seed 12345"
1698 )
1699 self.launch(command_line)
1700 assert os.path.isfile(f"{tmpdir}/test.dat")
1701 combined = read(f"{tmpdir}/test.dat").samples_dict
1702 one = read(f"{tmpdir}/example.json").samples_dict
1703 two = read(f"{tmpdir}/example2.h5").samples_dict
1704 nsamples = combined.number_of_samples
1705 half = int(nsamples / 2.)
1706 for param in combined.keys():
1707 assert all(ss in one[param] for ss in combined[param][:half])
1708 assert all(ss in two[param] for ss in combined[param][half:])
1710 # test that you add the samples to the original file
1711 command_line = (
1712 f"summarycombine_posteriors --outdir {tmpdir} --filename test.h5 "
1713 f"--file_format dat --samples {tmpdir}/samples/posterior_samples.h5 "
1714 "--labels one two --weights 0.5 0.5 --seed 12345 --add_to_existing"
1715 )
1716 self.launch(command_line)
1717 assert os.path.isfile(f"{tmpdir}/test.h5")
1718 combined = read(f"{tmpdir}/test.h5")
1719 combined_samples = combined.samples_dict
1720 assert "one_two_combined" in combined.labels
1721 assert "one_two_combined" in combined_samples.keys()
1722 combined_samples = combined_samples["one_two_combined"]
1723 for param in combined_samples.keys():
1724 assert all(ss in one[param] for ss in combined_samples[param][:half])
1725 assert all(ss in two[param] for ss in combined_samples[param][half:])
1726 # check that summarypages works fine on output
1727 command_line = (
1728 f"summarypages --webdir {tmpdir}/combined "
1729 f" --no_conversion --samples {tmpdir}/test.h5 "
1730 "--disable_corner --disable_interactive --gw"
1731 )
1732 self.launch(command_line)
1733 assert os.path.isfile(f"{tmpdir}/combined/samples/posterior_samples.h5")
1734 output = read(f"{tmpdir}/combined/samples/posterior_samples.h5")
1735 assert "one_two_combined" in output.labels
1738class TestSummaryModify(Base):
1739 """Test the `summarymodify` executable
1740 """
1741 def setup_method(self):
1742 """Setup the SummaryModify class
1743 """
1744 if not os.path.isdir(tmpdir):
1745 os.mkdir(tmpdir)
1746 make_result_file(
1747 pesummary=True, pesummary_label="replace", extension="hdf5",
1748 outdir=tmpdir
1749 )
1751 def teardown_method(self):
1752 """Remove the files and directories created from this class
1753 """
1754 if os.path.isdir(tmpdir):
1755 shutil.rmtree(tmpdir)
1757 @pytest.mark.executabletest
1758 def test_preferred(self):
1759 """Test that the preferred run is correctly specified in the meta file
1760 """
1761 from pesummary.io import read
1762 make_result_file(extension="json", bilby=True, gw=True, outdir=tmpdir)
1763 make_result_file(extension="dat", gw=True, outdir=tmpdir)
1764 command_line = (
1765 "summarycombine --webdir {0} --samples {0}/test.json "
1766 "{0}/test.dat --no_conversion --gw --labels one two "
1767 "--nsamples 100".format(
1768 tmpdir
1769 )
1770 )
1771 self.launch(command_line)
1772 f = read("{}/samples/posterior_samples.h5".format(tmpdir))
1773 assert f.preferred is None
1774 command_line = (
1775 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1776 "--webdir {0} --preferred two".format(tmpdir)
1777 )
1778 self.launch(command_line)
1779 f = read("{0}/modified_posterior_samples.h5".format(tmpdir))
1780 assert f.preferred == "two"
1782 @pytest.mark.executabletest
1783 def test_descriptions(self):
1784 """Test that the descriptions are correctly replaced in the meta file
1785 """
1786 import json
1787 import h5py
1789 command_line = (
1790 'summarymodify --webdir {0} --samples {0}/test.h5 '
1791 '--descriptions replace:TestingSummarymodify'.format(tmpdir)
1792 )
1793 self.launch(command_line)
1794 modified_data = h5py.File(
1795 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1796 )
1797 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1798 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1799 if "description" in original_data["replace"].keys():
1800 assert original_data["replace"]["description"][0] != b'TestingSummarymodify'
1801 assert modified_data["replace"]["description"][0] == b'TestingSummarymodify'
1802 modified_data.close()
1803 original_data.close()
1805 with open("{}/descriptions.json".format(tmpdir), "w") as f:
1806 json.dump({"replace": "NewDescription"}, f)
1808 command_line = (
1809 'summarymodify --webdir {0} --samples {0}/test.h5 '
1810 '--descriptions {0}/descriptions.json'.format(tmpdir)
1811 )
1812 self.launch(command_line)
1813 modified_data = h5py.File(
1814 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1815 )
1816 assert modified_data["replace"]["description"][0] == b'NewDescription'
1817 modified_data.close()
1819 @pytest.mark.executabletest
1820 def test_modify_config(self):
1821 """Test that the config file is correctly replaced in the meta file
1822 """
1823 import configparser
1824 import h5py
1825 user = getuser()
1826 config = configparser.ConfigParser()
1827 config.optionxform = str
1828 config.read(data_dir + "/config_lalinference.ini")
1829 config_dictionary = dict(config._sections)
1830 config_dictionary["paths"]["webdir"] = (
1831 "./{}/webdir".format(user)
1832 )
1833 make_result_file(
1834 pesummary=True, pesummary_label="replace", extension="hdf5",
1835 config=config_dictionary, outdir=tmpdir
1836 )
1837 f = h5py.File("{}/test.h5".format(tmpdir), "r")
1838 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1839 bytes("./{}/webdir".format(user), "utf-8")
1840 )
1841 f.close()
1842 config.read(data_dir + "/config_lalinference.ini")
1843 config_dictionary = dict(config._sections)
1844 config_dictionary["paths"]["webdir"] = "./replace/webdir"
1845 with open('{}/replace_config.ini'.format(tmpdir), 'w') as configfile:
1846 config.write(configfile)
1847 command_line = (
1848 "summarymodify --webdir {0} --samples {0}/test.h5 "
1849 "--config replace:{0}/replace_config.ini".format(tmpdir)
1850 )
1851 self.launch(command_line)
1852 f = h5py.File("{}/modified_posterior_samples.h5".format(tmpdir), "r")
1853 assert f["replace"]["config_file"]["paths"]["webdir"][0] != (
1854 bytes("./{}/webdir".format(user), "utf-8")
1855 )
1856 assert f["replace"]["config_file"]["paths"]["webdir"][0] == (
1857 bytes("./replace/webdir", "utf-8")
1858 )
1859 f.close()
1861 @pytest.mark.executabletest
1862 def test_modify_kwargs_replace(self):
1863 """Test that kwargs are correctly replaced in the meta file
1864 """
1865 import h5py
1867 command_line = (
1868 "summarymodify --webdir {0} --samples {0}/test.h5 "
1869 "--delimiter / --kwargs replace/log_evidence:1000".format(
1870 tmpdir
1871 )
1872 )
1873 self.launch(command_line)
1874 modified_data = h5py.File(
1875 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1876 )
1877 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1878 data = h5py.File("{}/test.h5".format(tmpdir), "r")
1879 assert original_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] != b'1000'
1880 assert modified_data["replace"]["meta_data"]["sampler"]["log_evidence"][0] == b'1000'
1881 modified_data.close()
1882 original_data.close()
1884 @pytest.mark.executabletest
1885 def test_modify_kwargs_append(self):
1886 """Test that kwargs are correctly added to the result file
1887 """
1888 import h5py
1890 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1891 assert "other" not in original_data["replace"]["meta_data"].keys()
1892 original_data.close()
1893 command_line = (
1894 "summarymodify --webdir {0} --samples {0}/test.h5 "
1895 "--delimiter / --kwargs replace/test:10 "
1896 "--overwrite".format(tmpdir)
1897 )
1898 self.launch(command_line)
1899 modified_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1900 assert modified_data["replace"]["meta_data"]["other"]["test"][0] == b'10'
1901 modified_data.close()
1903 @pytest.mark.executabletest
1904 def test_modify_posterior(self):
1905 """Test that a posterior distribution is correctly modified
1906 """
1907 import h5py
1909 new_posterior = np.random.uniform(10, 0.5, 1000)
1910 np.savetxt("{}/different_posterior.dat".format(tmpdir), new_posterior)
1911 command_line = (
1912 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1913 "--replace_posterior replace;mass_1:{0}/different_posterior.dat".format(
1914 tmpdir
1915 )
1916 )
1917 self.launch(command_line)
1918 modified_data = h5py.File(
1919 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1920 )
1921 np.testing.assert_almost_equal(
1922 modified_data["replace"]["posterior_samples"]["mass_1"], new_posterior
1923 )
1924 modified_data.close()
1925 command_line = (
1926 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1927 "--replace_posterior replace;abc:{0}/different_posterior.dat".format(
1928 tmpdir
1929 )
1930 )
1931 self.launch(command_line)
1932 modified_data = h5py.File(
1933 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
1934 )
1935 np.testing.assert_almost_equal(
1936 modified_data["replace"]["posterior_samples"]["abc"], new_posterior
1937 )
1938 modified_data.close()
1940 @pytest.mark.executabletest
1941 def test_remove_label(self):
1942 """Test that an analysis is correctly removed
1943 """
1944 from pesummary.io import read
1945 make_result_file(gw=True, extension="json", outdir=tmpdir)
1946 os.rename(
1947 "{}/test.json".format(tmpdir), "{}/example.json".format(tmpdir)
1948 )
1949 make_result_file(gw=True, extension="hdf5", outdir=tmpdir)
1950 os.rename(
1951 "{}/test.h5".format(tmpdir), "{}/example2.h5".format(tmpdir)
1952 )
1953 make_result_file(gw=True, extension="dat", outdir=tmpdir)
1954 os.rename(
1955 "{}/test.dat".format(tmpdir), "{}/example3.dat".format(tmpdir)
1956 )
1957 command_line = (
1958 "summarycombine --samples {0}/example.json {0}/example2.h5 "
1959 "{0}/example3.dat --labels one two three --webdir {0} "
1960 "--no_conversion".format(tmpdir)
1961 )
1962 self.launch(command_line)
1963 original = read("{}/samples/posterior_samples.h5".format(tmpdir))
1964 assert all(label in original.labels for label in ["one", "two", "three"])
1965 command_line = (
1966 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1967 "--remove_label one --webdir {0}".format(tmpdir)
1968 )
1969 self.launch(command_line)
1970 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1971 assert "one" not in f.labels
1972 assert all(label in f.labels for label in ["two", "three"])
1973 _original_samples = original.samples_dict
1974 _samples = f.samples_dict
1975 for label in ["two", "three"]:
1976 np.testing.assert_almost_equal(
1977 _original_samples[label].samples, _samples[label].samples
1978 )
1979 command_line = (
1980 "summarymodify --samples {0}/samples/posterior_samples.h5 "
1981 "--remove_label example --webdir {0}".format(tmpdir)
1982 )
1983 f = read("{}/modified_posterior_samples.h5".format(tmpdir))
1984 assert "one" not in f.labels
1985 assert all(label in f.labels for label in ["two", "three"])
1987 @pytest.mark.executabletest
1988 def test_remove_posterior(self):
1989 """Test that a posterior is correctly removed
1990 """
1991 import h5py
1993 command_line = (
1994 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
1995 "--remove_posterior replace;mass_1".format(tmpdir)
1996 )
1997 self.launch(command_line)
1998 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
1999 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
2000 if isinstance(params[0], bytes):
2001 params = [param.decode("utf-8") for param in params]
2002 assert "mass_1" in params
2003 modified_data = h5py.File(
2004 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2005 )
2006 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2007 original_data.close()
2008 modified_data.close()
2010 @pytest.mark.executabletest
2011 def test_remove_multiple_posteriors(self):
2012 """Test that multiple posteriors are correctly removed
2013 """
2014 import h5py
2016 command_line = (
2017 "summarymodify --webdir {0} --samples {0}/test.h5 --delimiter ; "
2018 "--remove_posterior replace;mass_1 replace;mass_2".format(
2019 tmpdir
2020 )
2021 )
2022 self.launch(command_line)
2023 original_data = h5py.File("{}/test.h5".format(tmpdir), "r")
2024 params = list(original_data["replace"]["posterior_samples"]["parameter_names"])
2025 if isinstance(params[0], bytes):
2026 params = [param.decode("utf-8") for param in params]
2027 assert "mass_1" in params
2028 assert "mass_2" in params
2029 modified_data = h5py.File(
2030 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2031 )
2032 assert "mass_1" not in modified_data["replace"]["posterior_samples"].dtype.names
2033 assert "mass_2" not in modified_data["replace"]["posterior_samples"].dtype.names
2034 original_data.close()
2035 modified_data.close()
2037 @pytest.mark.executabletest
2038 def test_store_skymap(self):
2039 """Test that multiple skymaps are correctly stored
2040 """
2041 import astropy_healpix as ah
2042 from ligo.skymap.io.fits import write_sky_map
2043 import h5py
2045 nside = 128
2046 npix = ah.nside_to_npix(nside)
2047 prob = np.random.random(npix)
2048 prob /= sum(prob)
2050 write_sky_map(
2051 '{}/test.fits'.format(tmpdir), prob,
2052 objid='FOOBAR 12345',
2053 gps_time=10494.3,
2054 creator="test",
2055 origin='LIGO Scientific Collaboration',
2056 )
2057 command_line = (
2058 "summarymodify --webdir {0} --samples {0}/test.h5 "
2059 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2060 )
2061 self.launch(command_line)
2062 modified_data = h5py.File(
2063 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2064 )
2065 assert "skymap" in modified_data["replace"].keys()
2066 np.testing.assert_almost_equal(
2067 modified_data["replace"]["skymap"]["data"], prob
2068 )
2069 np.testing.assert_almost_equal(
2070 modified_data["replace"]["skymap"]["meta_data"]["gps_time"][0], 10494.3
2071 )
2072 _creator = modified_data["replace"]["skymap"]["meta_data"]["creator"][0]
2073 if isinstance(_creator, bytes):
2074 _creator = _creator.decode("utf-8")
2075 assert _creator == "test"
2077 command_line = (
2078 "summarymodify --webdir {0} "
2079 "--samples {0}/modified_posterior_samples.h5 "
2080 "--store_skymap replace:{0}/test.fits --force_replace".format(
2081 tmpdir
2082 )
2083 )
2084 self.launch(command_line)
2085 command_line = (
2086 "summarypages --webdir {0}/webpage --gw --no_conversion "
2087 "--samples {0}/modified_posterior_samples.h5 ".format(tmpdir)
2088 )
2089 self.launch(command_line)
2090 data = h5py.File(
2091 "{}/webpage/samples/posterior_samples.h5".format(tmpdir), "r"
2092 )
2093 np.testing.assert_almost_equal(data["replace"]["skymap"]["data"], prob)
2094 data.close()
2095 with pytest.raises(ValueError):
2096 command_line = (
2097 "summarymodify --webdir {0} "
2098 "--samples {0}/modified_posterior_samples.h5 "
2099 "--store_skymap replace:{0}/test.fits".format(tmpdir)
2100 )
2101 self.launch(command_line)
2103 @pytest.mark.executabletest
2104 def test_modify(self):
2105 """Test the `summarymodify` script
2106 """
2107 import h5py
2109 command_line = (
2110 "summarymodify --webdir {0} --samples {0}/test.h5 "
2111 "--labels replace:new".format(tmpdir)
2112 )
2113 self.launch(command_line)
2114 modified_data = h5py.File(
2115 "{}/modified_posterior_samples.h5".format(tmpdir), "r"
2116 )
2117 data = h5py.File("{}/test.h5".format(tmpdir), "r")
2118 assert "replace" not in list(modified_data.keys())
2119 assert "new" in list(modified_data.keys())
2120 for key in data["replace"].keys():
2121 assert key in modified_data["new"].keys()
2122 for i, j in zip(data["replace"][key], modified_data["new"][key]):
2123 try:
2124 if isinstance(data["replace"][key][i],h5py._hl.dataset.Dataset):
2125 try:
2126 assert all(k == l for k, l in zip(
2127 data["replace"][key][i],
2128 modified_data["new"][key][j]
2129 ))
2130 except ValueError:
2131 assert all(
2132 all(m == n for m, n in zip(k, l)) for k, l in zip(
2133 data["replace"][key][i],
2134 modified_data["new"][key][j]
2135 )
2136 )
2137 except TypeError:
2138 pass
2139 data.close()
2140 modified_data.close()
2143class TestSummaryRecreate(Base):
2144 """Test the `summaryrecreate` executable
2145 """
2146 def setup_method(self):
2147 """Setup the SummaryRecreate class
2148 """
2149 import configparser
2151 if not os.path.isdir(tmpdir):
2152 os.mkdir(tmpdir)
2153 config = configparser.ConfigParser()
2154 config.optionxform = str
2155 config.read(data_dir + "/config_lalinference.ini")
2156 config_dictionary = dict(config._sections)
2157 config_dictionary["paths"]["webdir"] = (
2158 "./{}/webdir".format(getuser())
2159 )
2160 make_result_file(
2161 pesummary=True, pesummary_label="recreate", extension="hdf5",
2162 config=config_dictionary, outdir=tmpdir
2163 )
2164 with open("GW150914.txt", "w") as f:
2165 f.writelines(["115"])
2167 def teardown_method(self):
2168 """Remove the files and directories created from this class
2169 """
2170 if os.path.isdir(tmpdir):
2171 shutil.rmtree(tmpdir)
2173 @pytest.mark.executabletest
2174 def test_recreate(self):
2175 """Test the `summaryrecreate` script
2176 """
2177 import configparser
2179 command_line = (
2180 "summaryrecreate --rundir {0} --samples {0}/test.h5 ".format(
2181 tmpdir
2182 )
2183 )
2184 self.launch(command_line)
2185 assert os.path.isdir(os.path.join(tmpdir, "recreate"))
2186 assert os.path.isfile(os.path.join(tmpdir, "recreate", "config.ini"))
2187 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir"))
2188 assert os.path.isdir(os.path.join(tmpdir, "recreate", "outdir", "caches"))
2189 config = configparser.ConfigParser()
2190 config.read(os.path.join(tmpdir, "recreate", "config.ini"))
2191 original_config = configparser.ConfigParser()
2192 original_config.read(data_dir + "/config_lalinference.ini")
2193 for a, b in zip(
2194 sorted(config.sections()), sorted(original_config.sections())
2195 ):
2196 assert a == b
2197 for key, item in config[a].items():
2198 assert config[b][key] == item
2199 command_line = (
2200 "summaryrecreate --rundir {0}_modify --samples {0}/test.h5 "
2201 "--config_override approx:IMRPhenomPv3HM srate:4096".format(
2202 tmpdir
2203 )
2204 )
2205 self.launch(command_line)
2206 config = configparser.ConfigParser()
2207 config.read(os.path.join("{}_modify".format(tmpdir), "recreate", "config.ini"))
2208 original_config = configparser.ConfigParser()
2209 original_config.read(data_dir + "/config_lalinference.ini")
2210 for a, b in zip(
2211 sorted(config.sections()), sorted(original_config.sections())
2212 ):
2213 assert a == b
2214 for key, item in config[a].items():
2215 if key == "approx":
2216 assert original_config[b][key] != item
2217 assert config[b][key] == "IMRPhenomPv3HM"
2218 elif key == "srate":
2219 assert original_config[b][key] != item
2220 assert config[b][key] == "4096"
2221 elif key == "webdir":
2222 pass
2223 else:
2224 assert original_config[b][key] == item
2227class TestSummaryCompare(Base):
2228 """Test the SummaryCompare executable
2229 """
2230 def setup_method(self):
2231 """Setup the SummaryCompare class
2232 """
2233 if not os.path.isdir(tmpdir):
2234 os.mkdir(tmpdir)
2236 def teardown_method(self):
2237 """Remove the files and directories created from this class
2238 """
2239 if os.path.isdir(tmpdir):
2240 shutil.rmtree(tmpdir)
2242 @pytest.mark.executabletest
2243 def test_example_in_docs(self):
2244 """Test that the code runs for the example in the docs
2245 """
2246 import numpy as np
2247 from pesummary.io import write
2249 parameters = ["a", "b", "c", "d"]
2250 data = np.random.random([100, 4])
2251 write(
2252 parameters, data, file_format="dat", outdir=tmpdir,
2253 filename="example1.dat"
2254 )
2255 parameters2 = ["a", "b", "c", "d", "e"]
2256 data2 = np.random.random([100, 5])
2257 write(
2258 parameters2, data2, file_format="json", outdir=tmpdir,
2259 filename="example2.json"
2260 )
2261 command_line = (
2262 "summarycompare --samples {0}/example1.dat "
2263 "{0}/example2.json --properties_to_compare posterior_samples "
2264 "-v --generate_comparison_page --webdir {0}".format(
2265 tmpdir
2266 )
2267 )
2268 self.launch(command_line)
2271class TestSummaryJSCompare(Base):
2272 """Test the `summaryjscompare` executable
2273 """
2274 def setup_method(self):
2275 """Setup the SummaryJSCompare class
2276 """
2277 self.dirs = [tmpdir]
2278 for dd in self.dirs:
2279 if not os.path.isdir(dd):
2280 os.mkdir(dd)
2282 def teardown_method(self):
2283 """Remove the files and directories created from this class
2284 """
2285 for dd in self.dirs:
2286 if os.path.isdir(dd):
2287 shutil.rmtree(dd)
2289 @pytest.mark.executabletest
2290 def test_runs_on_core_file(self):
2291 """Test that the code successfully generates a plot for 2 core result files
2292 """
2293 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2294 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2295 make_result_file(outdir=tmpdir, bilby=True, gw=False)
2296 os.rename("{}/test.json".format(tmpdir), "{}/bilby2.json".format(tmpdir))
2297 command_line = (
2298 "summaryjscompare --event test-bilby1-bilby2 --main_keys a b c d "
2299 "--webdir {0} --samples {0}/bilby.json "
2300 "{0}/bilby2.json --labels bilby1 bilby2".format(tmpdir)
2301 )
2302 self.launch(command_line)
2304 @pytest.mark.executabletest
2305 def test_runs_on_gw_file(self):
2306 """Test that the code successfully generates a plot for 2 gw result files
2307 """
2308 make_result_file(outdir=tmpdir, bilby=True, gw=True)
2309 os.rename("{}/test.json".format(tmpdir), "{}/bilby.json".format(tmpdir))
2310 make_result_file(outdir=tmpdir, lalinference=True)
2311 os.rename("{}/test.hdf5".format(tmpdir), "{}/lalinference.hdf5".format(tmpdir))
2312 command_line = (
2313 "summaryjscompare --event test-bilby-lalinf --main_keys mass_1 "
2314 "mass_2 a_1 a_2 --webdir {0} --samples {0}/bilby.json "
2315 "{0}/lalinference.hdf5 --labels bilby lalinf".format(
2316 tmpdir
2317 )
2318 )
2319 self.launch(command_line)