4 from __future__
import (division, print_function, absolute_import,
7 __all__ = [
"PTSampler"]
15 from .sampler
import Sampler
16 from .ensemble
import EnsembleSampler
17 import multiprocessing
as multi
19 import numpy.random
as nr
24 Wrapper for posterior used with the :class:`PTSampler` in emcee.
31 Function returning natural log of the likelihood.
34 Function returning natural log of the prior.
37 Inverse temperature of this chain: ``lnpost = beta*logl + logp``.
47 Returns ``lnpost(x)``, ``lnlike(x)`` (the second value will be
48 treated as a blob by emcee), where
52 \ln \pi(x) \equiv \beta \ln l(x) + \ln p(x)
55 The position in parameter space.
61 if lp == float(
'-inf'):
66 return self.
_beta * ll + lp, ll
71 A parallel-tempered ensemble sampler, using :class:`EnsembleSampler`
72 for sampling within each parallel chain.
75 The number of temperatures.
78 The number of ensemble walkers at each temperature.
81 The dimension of parameter space.
84 The log-likelihood function.
87 The log-prior function.
89 :param threads: (optional)
90 The number of parallel threads to use in sampling.
92 :param pool: (optional)
93 Alternative to ``threads``. Any object that implements a
94 ``map`` method compatible with the built-in ``map`` will do
95 here. For example, :class:`multi.Pool` will do.
97 :param betas: (optional)
98 Array giving the inverse temperatures, :math:`\\beta=1/T`, used in the
99 ladder. The default is for an exponential ladder, with beta
100 decreasing by a factor of :math:`1/\\sqrt{2}` each rung.
103 def __init__(self, ntemps, nwalkers, dim, logl, logp, threads=1,
104 pool=
None, betas=
None):
121 self.
nswap = np.zeros(ntemps, dtype=np.float)
125 if threads > 1
and pool
is None:
126 self.
pool = multi.Pool(threads)
128 self.
samplers = [EnsembleSampler(nwalkers, dim,
135 Exponential ladder in :math:`1/T`, with :math:`T` increasing by
136 :math:`\\sqrt{2}` each step, with ``ntemps`` in total.
139 return np.exp(np.linspace(0, -(ntemps - 1) * 0.5 * np.log(2), ntemps))
143 Clear the ``chain``, ``lnprobability``, ``lnlikelihood``,
144 ``acceptance_fraction``, ``tswap_acceptance_fraction`` stored
158 def sample(self, p0, lnprob0=None, lnlike0=None, iterations=1,
159 thin=1, storechain=
True):
161 Advance the chains ``iterations`` steps as a generator.
164 The initial positions of the walkers. Shape should be
165 ``(ntemps, nwalkers, dim)``.
167 :param lnprob0: (optional)
168 The initial posterior values for the ensembles. Shape
169 ``(ntemps, nwalkers)``.
171 :param lnlike0: (optional)
172 The initial likelihood values for the ensembles. Shape
173 ``(ntemps, nwalkers)``.
175 :param iterations: (optional)
176 The number of iterations to preform.
178 :param thin: (optional)
179 The number of iterations to perform between saving the
180 state to the internal chain.
182 :param storechain: (optional)
183 If ``True`` store the iterations in the ``chain``
186 At each iteration, this generator yields
188 * ``p``, the current position of the walkers.
190 * ``lnprob`` the current posterior values for the walkers.
192 * ``lnlike`` the current likelihood values for the walkers.
195 p = np.copy(np.array(p0))
198 if lnprob0
is None or lnlike0
is None:
201 for i
in range(self.
ntemps):
203 if self.
pool is None:
204 results = list(map(fn, p[i, :, :]))
206 results = list(self.pool.map(fn, p[i, :, :]))
208 lnprob0[i, :] = np.array([r[0]
for r
in results])
209 lnlike0[i, :] = np.array([r[1]
for r
in results])
216 nsave = iterations / thin
225 isave = self._chain.shape[2]
242 for i
in range(iterations):
243 for j, s
in enumerate(self.
samplers):
244 for psamp, lnprobsamp, rstatesamp, loglsamp
in s.sample(
246 lnprob0=lnprob[j, ...],
247 blobs0=logl[j, ...], storechain=
False):
249 lnprob[j, ...] = lnprobsamp
250 logl[j, ...] = np.array(loglsamp)
254 if (i + 1) % thin == 0:
256 self.
_chain[:, :, isave, :] = p
257 self.
_lnprob[:, :, isave, ] = lnprob
261 yield p, lnprob, logl
263 def _temperature_swaps(self, p, lnprob, logl):
265 Perform parallel-tempering temperature swaps on the state
266 in ``p`` with associated ``lnprob`` and ``logl``.
271 for i
in range(ntemps - 1, 0, -1):
273 bi1 = self.
betas[i - 1]
277 iperm = nr.permutation(self.
nwalkers)
278 i1perm = nr.permutation(self.
nwalkers)
280 raccept = np.log(nr.uniform(size=self.
nwalkers))
281 paccept = dbeta * (logl[i, iperm] - logl[i - 1, i1perm])
286 asel = (paccept > raccept)
292 ptemp = np.copy(p[i, iperm[asel], :])
293 ltemp = np.copy(logl[i, iperm[asel]])
294 prtemp = np.copy(lnprob[i, iperm[asel]])
296 p[i, iperm[asel], :] = p[i - 1, i1perm[asel], :]
297 logl[i, iperm[asel]] = logl[i - 1, i1perm[asel]]
298 lnprob[i, iperm[asel]] = lnprob[i - 1, i1perm[asel]] \
299 - dbeta * logl[i - 1, i1perm[asel]]
301 p[i - 1, i1perm[asel], :] = ptemp
302 logl[i - 1, i1perm[asel]] = ltemp
303 lnprob[i - 1, i1perm[asel]] = prtemp + dbeta * ltemp
305 return p, lnprob, logl
309 Thermodynamic integration estimate of the evidence.
311 :param logls: (optional) The log-likelihoods to use for
312 computing the thermodynamic evidence. If ``None`` (the
313 default), use the stored log-likelihoods in the sampler.
314 Should be of shape ``(Ntemps, Nwalkers, Nsamples)``.
316 :param fburnin: (optional)
317 The fraction of the chain to discard as burnin samples; only the
318 final ``1-fburnin`` fraction of the samples will be used to
319 compute the evidence; the default is ``fburnin = 0.1``.
321 :return ``(lnZ, dlnZ)``: Returns an estimate of the
322 log-evidence and the error associated with the finite
323 number of temperatures at which the posterior has been
326 The evidence is the integral of the un-normalized posterior
327 over all of parameter space:
331 Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
333 Thermodymanic integration is a technique for estimating the
334 evidence integral using information from the chains at various
339 Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
345 \\frac{d \\ln Z}{d \\beta}
346 = \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\ln l
347 = \\left \\langle \\ln l \\right \\rangle_\\beta
354 = \\int_0^1 d\\beta \\left \\langle \\ln l \\right\\rangle_\\beta
356 By computing the average of the log-likelihood at the
357 difference temperatures, the sampler can approximate the above
365 betas = np.concatenate((self.
betas, np.array([0])))
366 betas2 = np.concatenate((self.
betas[::2], np.array([0])))
368 istart = int(logls.shape[2] * fburnin + 0.5)
370 mean_logls = np.mean(np.mean(logls, axis=1)[:, istart:], axis=1)
371 mean_logls2 = mean_logls[::2]
373 lnZ = -np.dot(mean_logls, np.diff(betas))
374 lnZ2 = -np.dot(mean_logls2, np.diff(betas2))
376 return lnZ, np.abs(lnZ - lnZ2)
381 Returns the sequence of inverse temperatures in the ladder.
389 Returns the stored chain of samples; shape ``(Ntemps,
390 Nwalkers, Nsteps, Ndim)``.
398 Matrix of lnprobability values; shape ``(Ntemps, Nwalkers, Nsteps)``.
406 Matrix of ln-likelihood values; shape ``(Ntemps, Nwalkers, Nsteps)``.
414 Returns an array of accepted temperature swap fractions for
415 each temperature; shape ``(ntemps, )``.
423 Matrix of shape ``(Ntemps, Nwalkers)`` detailing the
424 acceptance fraction for each walker.
427 return np.array([s.acceptance_fraction
for s
in self.
samplers])
432 Returns a matrix of autocorrelation lengths for each
433 parameter in each temperature of shape ``(Ntemps, Ndim)``.
437 raise ImportError(
'acor')
439 acors = np.zeros((self.
ntemps, self.
dim))
441 for i
in range(self.
ntemps):
442 for j
in range(self.
dim):
443 acors[i, j] = acor.acor(self.
_chain[i, :, :, j])[0]