Coverage for pesummary/gw/file/formats/default.py: 87.8%

82 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-05-02 08:42 +0000

1# Licensed under an MIT style license -- see LICENSE.md 

2 

3import numpy as np 

4from pesummary.gw.file.formats.base_read import ( 

5 GWRead, GWSingleAnalysisRead, GWMultiAnalysisRead 

6) 

7from pesummary.core.file.formats.default import Default as CoreDefault 

8 

9__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"] 

10 

11 

12class SingleAnalysisDefault(GWSingleAnalysisRead): 

13 """Class to handle result files which only contain a single analysis 

14 

15 Parameters 

16 ---------- 

17 path_to_results_file: str 

18 path to the results file you wish to load 

19 remove_nan_likelihood_samples: Bool, optional 

20 if True, remove samples which have log_likelihood='nan'. Default True 

21 

22 Attributes 

23 ---------- 

24 parameters: list 

25 list of parameters stored in the result file 

26 converted_parameters: list 

27 list of parameters that have been derived from the sampled distributions 

28 samples: 2d list 

29 list of samples stored in the result file 

30 samples_dict: dict 

31 dictionary of samples stored in the result file keyed by parameters 

32 input_version: str 

33 version of the result file passed. 

34 extra_kwargs: dict 

35 dictionary of kwargs that were extracted from the result file 

36 converted_parameters: list 

37 list of parameters that have been added 

38 pe_algorithm: str 

39 name of the algorithm used to generate the posterior samples 

40 

41 Methods 

42 ------- 

43 to_dat: 

44 save the posterior samples to a .dat file 

45 to_latex_table: 

46 convert the posterior samples to a latex table 

47 generate_latex_macros: 

48 generate a set of latex macros for the stored posterior samples 

49 to_lalinference: 

50 convert the posterior samples to a lalinference result file 

51 generate_all_posterior_samples: 

52 generate all posterior distributions that may be derived from 

53 sampled distributions 

54 """ 

55 def __init__(self, *args, _data=None, **kwargs): 

56 super(SingleAnalysisDefault, self).__init__(*args, **kwargs) 

57 if _data is not None: 

58 self.load(None, _data=_data, **kwargs) 

59 

60 

61class MultiAnalysisDefault(GWMultiAnalysisRead): 

62 """Class to handle result files which contain multiple analyses 

63 

64 Parameters 

65 ---------- 

66 path_to_results_file: str 

67 path to the results file you wish to load 

68 remove_nan_likelihood_samples: Bool, optional 

69 if True, remove samples which have log_likelihood='nan'. Default True 

70 

71 Attributes 

72 ---------- 

73 parameters: 2d list 

74 list of parameters stored in the result file for each analysis 

75 converted_parameters: 2d list 

76 list of parameters that have been derived from the sampled distributions 

77 samples: 3d list 

78 list of samples stored in the result file for each analysis 

79 samples_dict: dict 

80 dictionary of samples stored in the result file keyed by analysis label 

81 input_version: str 

82 version of the result file passed. 

83 extra_kwargs: dict 

84 dictionary of kwargs that were extracted from the result file 

85 

86 Methods 

87 ------- 

88 samples_dict_for_label: dict 

89 dictionary of samples for a specific analysis 

90 reduced_samples_dict: dict 

91 dictionary of samples for one or more analyses 

92 to_dat: 

93 save the posterior samples to a .dat file 

94 to_latex_table: 

95 convert the posterior samples to a latex table 

96 generate_latex_macros: 

97 generate a set of latex macros for the stored posterior samples 

98 to_lalinference: 

99 convert the posterior samples to a lalinference result file 

100 generate_all_posterior_samples: 

101 generate all posterior distributions that may be derived from 

102 sampled distributions 

103 """ 

104 def __init__(self, *args, _data=None, **kwargs): 

105 super(MultiAnalysisDefault, self).__init__(*args, **kwargs) 

106 if _data is not None: 

107 self.load(None, _data=_data, **kwargs) 

108 

109 

110class Default(CoreDefault): 

111 """Class to handle the default loading options. 

112 

113 Parameters 

114 ---------- 

115 path_to_results_file: str 

116 path to the results file you wish to load 

117 remove_nan_likelihood_samples: Bool, optional 

118 if True, remove samples which have log_likelihood='nan'. Default True 

119 

120 Attributes 

121 ---------- 

122 parameters: list 

123 list of parameters stored in the result file 

124 converted_parameters: list 

125 list of parameters that have been derived from the sampled distributions 

126 samples: 2d list 

127 list of samples stored in the result file 

128 samples_dict: dict 

129 dictionary of samples stored in the result file keyed by parameters 

130 input_version: str 

131 version of the result file passed. 

132 extra_kwargs: dict 

133 dictionary of kwargs that were extracted from the result file 

134 converted_parameters: list 

135 list of parameters that have been added 

136 

137 Methods 

138 ------- 

139 to_dat: 

140 save the posterior samples to a .dat file 

141 to_latex_table: 

142 convert the posterior samples to a latex table 

143 generate_latex_macros: 

144 generate a set of latex macros for the stored posterior samples 

145 to_lalinference: 

146 convert the posterior samples to a lalinference result file 

147 generate_all_posterior_samples: 

148 generate all posterior distributions that may be derived from 

149 sampled distributions 

150 """ 

151 def load_map(self): 

152 _load_map = super(Default, self).load_map(self) 

153 _load_map.update({ 

154 "xml": self._grab_data_from_xml_file 

155 }) 

156 return _load_map 

157 

158 def __new__(self, path_to_results_file, **kwargs): 

159 data = super(Default, self).__new__( 

160 self, path_to_results_file, _single_default=SingleAnalysisDefault, 

161 _multi_default=MultiAnalysisDefault, **kwargs 

162 ) 

163 self.module = "gw" 

164 return data 

165 

166 @staticmethod 

167 def grab_extra_kwargs(parameters, samples): 

168 """Grab any additional information stored in the file 

169 """ 

170 def find_parameter_given_alternatives(parameters, options): 

171 if any(i in options for i in parameters): 

172 parameter = [i for i in parameters if i in options] 

173 ind = parameters.index(parameter[0]) 

174 return ind 

175 return None 

176 

177 kwargs = {"sampler": {}, "meta_data": {}} 

178 possible_f_ref = ["f_ref", "fRef", "fref", "fref_template"] 

179 ind = find_parameter_given_alternatives(parameters, possible_f_ref) 

180 if ind is not None: 

181 kwargs["meta_data"]["f_ref"] = samples[0][ind] 

182 possible_f_low = ["flow", "f_low", "fLow", "flow_template"] 

183 ind = find_parameter_given_alternatives(parameters, possible_f_low) 

184 if ind is not None: 

185 kwargs["meta_data"]["f_low"] = samples[0][ind] 

186 return kwargs 

187 

188 @staticmethod 

189 def _grab_data_from_dat_file(path, **kwargs): 

190 """Grab the data stored in a .dat file 

191 """ 

192 data = CoreDefault._grab_data_from_dat_file(path) 

193 parameters, samples = data["parameters"], data["samples"] 

194 parameters = GWRead._check_definition_of_inclination(parameters) 

195 condition1 = "luminosity_distance" not in parameters 

196 condition2 = "logdistance" in parameters 

197 if condition1 and condition2: 

198 parameters.append("luminosity_distance") 

199 for num, i in enumerate(samples): 

200 samples[num].append( 

201 np.exp(i[parameters.index("logdistance")])) 

202 try: 

203 extra_kwargs = Default.grab_extra_kwargs(parameters, samples) 

204 except Exception: 

205 extra_kwargs = {"sampler": {}, "meta_data": {}} 

206 extra_kwargs["sampler"]["nsamples"] = len(samples) 

207 return { 

208 "parameters": parameters, "samples": samples, 

209 "injection": Default._default_injection(parameters), 

210 "kwargs": extra_kwargs 

211 } 

212 

213 @staticmethod 

214 def _grab_data_from_hdf5_file(path, path_to_samples=None, **kwargs): 

215 """Grab the data stored in an hdf5 file 

216 """ 

217 return CoreDefault._grab_data_from_hdf5_file( 

218 path, remove_params=["waveform_approximant"], 

219 path_to_samples=path_to_samples, **kwargs 

220 ) 

221 

222 @staticmethod 

223 def _grab_data_from_numpy_file(path, file_format=None, **kwargs): 

224 """Grab the data stored in a .npy file 

225 """ 

226 if file_format is not None: 

227 try: 

228 import importlib 

229 module = importlib.import_module( 

230 "pesummary.gw.file.formats.{}".format(file_format) 

231 ) 

232 data = getattr(module, "read_{}".format(file_format))(path, **kwargs) 

233 return { 

234 "parameters": data[0], "samples": data[1], 

235 "injection": Default._default_injection(data[0]) 

236 } 

237 except ModuleNotFoundError: 

238 from pesummary.utils.utils import logger 

239 logger.warning( 

240 "Failed to find the module '{}'. Therefore ignoring " 

241 "file_format={} and using default load".format( 

242 "pesummary.gw.file.formats.{}".format(file_format), 

243 file_format 

244 ) 

245 ) 

246 return CoreDefault._grab_data_from_numpy_file(path, **kwargs) 

247 

248 @staticmethod 

249 def _grab_data_from_prior_file(path, **kwargs): 

250 """Grab the data stored in a .prior file 

251 """ 

252 return CoreDefault._grab_data_from_prior_file( 

253 path, module="gw", **kwargs 

254 ) 

255 

256 @staticmethod 

257 def _grab_data_from_xml_file(path, **kwargs): 

258 """Grab the data stored in an xml file 

259 """ 

260 from pesummary.gw.file.formats.xml import read_xml 

261 

262 parameters, samples = read_xml(path, **kwargs) 

263 extra_kwargs = {"sampler": {"nsamples": len(samples)}, "meta_data": {}} 

264 return { 

265 "parameters": parameters, "samples": samples, 

266 "injection": Default._default_injection(parameters), 

267 "kwargs": extra_kwargs 

268 }