58 from pylal
import datatypes
as laltypes
59 from pylal
import lalfft
60 from pylal
import spawaveform
62 import lalsimulation
as lalsim
65 from gstlal.reference_psd
import interpolate_psd, horizon_distance
68 from gstlal
import templates
69 from gstlal
import chirptime
72 __author__ =
"Kipp Cannon <kipp.cannon@ligo.org>, Chad Hanna <chad.hanna@ligo.org>, Drew Keppel <drew.keppel@ligo.org>"
86 def tukeywindow(data, samps = 200.):
87 assert (len(data) >= 2 * samps)
88 tp = float(samps) / len(data)
89 return lal.CreateTukeyREAL8Window(len(data), tp).data.data
92 def generate_template(template_bank_row, approximant, sample_rate, duration, f_low, f_high, amporder = 0, order = 7, fwdplan = None, fworkspace = None):
94 Generate a single frequency-domain template, which
95 (1) is band-limited between f_low and f_high,
96 (2) has an IFFT which is duration seconds long and
97 (3) has an IFFT which is sampled at sample_rate Hz
99 if approximant
in templates.gstlal_approximants:
105 hplus,hcross = lalsim.SimInspiralFD(
108 lal.MSUN_SI * template_bank_row.mass1,
109 lal.MSUN_SI * template_bank_row.mass2,
110 template_bank_row.spin1x,
111 template_bank_row.spin1y,
112 template_bank_row.spin1z,
113 template_bank_row.spin2x,
114 template_bank_row.spin2y,
115 template_bank_row.spin2z,
128 lalsim.GetApproximantFromString(str(approximant))
133 assert len(z) == int(round(sample_rate * duration))//2 +1
136 raise ValueError(
"Unsupported approximant given %s" % approximant)
138 return laltypes.COMPLEX16FrequencySeries(
140 epoch = laltypes.LIGOTimeGPS(hplus.epoch.gpsSeconds, hplus.epoch.gpsNanoSeconds),
142 deltaF = 1.0 / duration,
143 sampleUnits = laltypes.LALUnit(
"strain"),
147 def condition_imr_template(approximant, data, epoch_time, sample_rate_max, max_ringtime):
148 assert -len(data) / sample_rate_max <= epoch_time < 0.0,
"Epoch returned follows a different convention"
151 epoch_index = -int(epoch_time*sample_rate_max) - 1
154 target_index = len(data)-1 - int(sample_rate_max * max_ringtime)
156 phase = numpy.arctan2(data[epoch_index].imag, data[epoch_index].real)
157 data *= numpy.exp(-1.j * phase)
158 data = numpy.roll(data, target_index-epoch_index)
161 tukey_beta = 2. * abs(target_index - epoch_index) / float(len(data))
162 assert 0. <= tukey_beta <= 1.,
"waveform got rolled WAY too much"
163 data *= lal.CreateTukeyREAL8Window(len(data), tukey_beta).data.data
165 return data, target_index
168 def compute_autocorrelation_mask( autocorrelation ):
170 Given an autocorrelation time series, estimate the optimal
171 autocorrelation length to use and return a matrix which masks
172 out the unwanted elements. FIXME TODO for now just returns
175 return numpy.ones( autocorrelation.shape, dtype=
"int" )
178 def movingmedian(interval, window_size):
179 tmp = numpy.copy(interval)
180 for i
in range(window_size, len(interval)-window_size):
181 tmp[i] = numpy.median(interval[i-window_size:i+window_size])
185 def movingaverage(interval, window_size):
186 window = lal.CreateTukeyREAL8Window(window_size, 0.5).data.data
187 return numpy.convolve(interval, window,
'same')
190 def condition_psd(psd, newdeltaF, minfs = (35.0, 40.0), maxfs = (1800., 2048.), smoothing_frequency = 4.):
192 A function to condition the psd that will be used to whiten waveforms
194 @param psd A real8 frequency series containing the psd
195 @param newdeltaF (Hz) The target delta F to interpolate to
196 @param minfs (Hz) The frequency boundaries over which to taper the spectrum to infinity. i.e., frequencies below the first item in the tuple will have an infinite spectrum, the second item in the tuple will not be changed. A taper from 0 to infinity is applied in between. The PSD is also tapered from 0.85 * Nyquist to Nyquist.
197 @param smoothing_frequency (Hz) The target frequency resolution after smoothing. Lines with bandwidths << smoothing_frequency are removed via a median calculation. Remaining features will be blurred out to this resolution.
199 returns a conditioned psd
206 horizon_before = horizon_distance(psd, 1.4, 1.4, 8.0, minfs[1], maxfs[0])
212 psd = interpolate_psd(psd, newdeltaF)
219 avgwindow = int(smoothing_frequency / newdeltaF)
220 psddata = movingmedian(psddata, avgwindow)
221 psddata = movingaverage(psddata, avgwindow)
227 kmin = int(minfs[0] / newdeltaF)
228 kmax = int(minfs[1] / newdeltaF)
229 psddata[:kmin] = float(
'Inf')
230 psddata[kmin:kmax] /= numpy.sin(numpy.arange(kmax-kmin) / (kmax-kmin-1.) * numpy.pi / 2.0)**4
232 kmin = int(maxfs[0] / newdeltaF)
233 kmax = int(maxfs[1] / newdeltaF)
234 psddata[kmax:] = float(
'Inf')
235 psddata[kmin:kmax] /= numpy.cos(numpy.arange(kmax-kmin) / (kmax-kmin-1.) * numpy.pi / 2.0)**4
243 horizon_after = horizon_distance(psd, 1.4, 1.4, 8.0, minfs[1], maxfs[0])
246 psd.data = psddata * (horizon_after / horizon_before)**2
255 def joliens_function(f, template_table):
257 A function to compute the padding needed to gaurantee well behaved waveforms
259 @param f The target low frequency starting point
260 @param template_table The sngl_inspiral table containing all the template parameters for this bank
262 Returns the extra padding in time (tx) and the new low frequency cut
263 off that should be used (f_new) as a tuple: (tx, f_new)
265 def _chirp_duration(f, m1, m2):
267 @returns the Newtonian chirp duration in seconds
268 @param f the starting frequency in Hertz
269 @param m1 mass of one component in solar masses
270 @param m2 mass of the other component in solar masses
280 v = (numpy.pi * M * f)**(1.0/3.0)
281 return 5.0 * M / (256.0 * nu * v**8)
283 def _chirp_start_frequency(t, m1, m2):
285 @returns the Newtonian chirp start frequency in Hertz
286 @param t the time before coalescence in seconds
287 @param m1 mass of one component in solar masses
288 @param m2 mass of the other component in solar masses
298 theta = (nu * t / (5.0 * M))**(-1.0/8.0)
299 return theta**3 / (8.0 * numpy.pi * M)
301 minmc = min(template_table.getColumnByName(
'mchirp'))
302 row = [t
for t
in template_table
if t.mchirp == minmc][0]
303 m1, m2 = row.mass1, row.mass2
304 tc = _chirp_duration(f, m1, m2)
306 f_new = _chirp_start_frequency(tx + tc, m1, m2)
310 def generate_templates(template_table, approximant, psd, f_low, time_slices, autocorrelation_length = None, verbose = False):
312 Generate a bank of templates, which are
313 (1) broken up into time slice,
314 (2) down-sampled in each time slice and
315 (3) whitened with the given psd.
317 sample_rate_max = max(time_slices[
'rate'])
318 duration = max(time_slices[
'end'])
319 length_max = int(round(duration * sample_rate_max))
322 working_f_low_extra_time, working_f_low = joliens_function(f_low, template_table)
326 working_duration = float(working_length) / sample_rate_max
330 psd = condition_psd(psd, 1.0 / working_duration, minfs = (working_f_low, f_low), maxfs = (sample_rate_max / 2.0 * 0.90, sample_rate_max / 2.0))
332 revplan = lalfft.XLALCreateReverseCOMPLEX16FFTPlan(working_length, 1)
333 fwdplan = lalfft.XLALCreateForwardREAL8FFTPlan(working_length, 1)
334 tseries = laltypes.COMPLEX16TimeSeries(
335 data = numpy.zeros((working_length,), dtype =
"cdouble")
337 fworkspace = laltypes.COMPLEX16FrequencySeries(
339 epoch = laltypes.LIGOTimeGPS(0),
341 deltaF = 1.0 / working_duration,
342 data = numpy.zeros((working_length//2 + 1,), dtype =
"cdouble")
346 if autocorrelation_length
is not None:
347 if not (autocorrelation_length % 2):
348 raise ValueError,
"autocorrelation_length must be odd (got %d)" % autocorrelation_length
349 autocorrelation_bank = numpy.zeros((len(template_table), autocorrelation_length), dtype =
"cdouble")
350 autocorrelation_mask = compute_autocorrelation_mask( autocorrelation_bank )
352 autocorrelation_bank =
None
353 autocorrelation_mask =
None
356 template_bank = [numpy.zeros((2 * len(template_table), int(round(rate*(end-begin)))), dtype =
"double")
for rate,begin,end
in time_slices]
365 for i, row
in enumerate(template_table):
367 print >>sys.stderr,
"generating template %d/%d: m1 = %g, m2 = %g, s1x = %g, s1y = %g, s1z = %g, s2x = %g, s2y = %g, s2z = %g" % (i + 1, len(template_table), row.mass1, row.mass2, row.spin1x, row.spin1y, row.spin1z, row.spin2x, row.spin2y, row.spin2z)
374 fseries = generate_template(row, approximant, sample_rate_max, working_duration, f_low, sample_rate_max / 2., fwdplan = fwdplan, fworkspace = fworkspace)
381 lalfft.XLALWhitenCOMPLEX16FrequencySeries(fseries, psd)
388 if autocorrelation_bank
is not None:
390 autocorrelation_bank[i, ::-1] = numpy.concatenate((autocorrelation[-(autocorrelation_length // 2):], autocorrelation[:(autocorrelation_length // 2 + 1)]))
396 lalfft.XLALCOMPLEX16FreqTimeFFT(tseries, fseries, revplan)
399 epoch_time = fseries.epoch.seconds + fseries.epoch.nanoseconds*1.e-9
410 if approximant
in templates.gstlal_IMR_approximants:
411 data, target_index = condition_imr_template(approximant, data, epoch_time, sample_rate_max, max_ringtime)
413 row.set_end(laltypes.LIGOTimeGPS(float(target_index-(len(data) - 1.))/sample_rate_max))
415 data *= tukeywindow(data, samps = 32)
417 data = data[-length_max:]
423 norm = abs(numpy.dot(data, numpy.conj(data)))
424 data *= cmath.sqrt(2 / norm)
454 sigmasq.append(norm * len(data) / sample_rate_max**2.)
461 for j, time_slice
in enumerate(time_slices):
470 stride = int(round(sample_rate_max / time_slice[
'rate']))
471 begin_index = length_max - int(round(time_slice[
'begin'] * sample_rate_max)) + stride - 1
472 end_index = length_max - int(round(time_slice[
'end'] * sample_rate_max)) + stride - 1
474 assert stride * time_slice[
'rate'] == sample_rate_max
485 template_bank[j][(2*i+0),:] = data.real[end_index:begin_index:stride] * math.sqrt(stride)
486 template_bank[j][(2*i+1),:] = data.imag[end_index:begin_index:stride] * math.sqrt(stride)
488 return template_bank, autocorrelation_bank, autocorrelation_mask, sigmasq, psd
491 def decompose_templates(template_bank, tolerance, identity = False):
496 chifacs = (template_bank * template_bank).sum(1)
506 return template_bank,
None,
None, chifacs
512 tolerance = 1 - (1 - tolerance) / chifacs.max()
518 U, s, Vh = spawaveform.svd(template_bank.T,mod=
True,inplace=
True)
524 residual = numpy.sqrt((s * s).cumsum() / numpy.dot(s, s))
526 n = max(min(residual.searchsorted(tolerance) + 1, len(s)), 6)
533 Vh = numpy.dot(numpy.diag(s), Vh)[:n,:]
544 V2 = (Vh * Vh).sum(0)
545 for idx,v2
in enumerate(V2):
546 Vh[:, idx] *= numpy.sqrt(chifacs[idx] / v2)
552 return U.T, s, Vh, chifacs