1 """
2 Adaptive Rejection Sampling (ARS)
3
4 The ARS class generates a single random sample from a
5 univariate distribution specified by an instance of the
6 LogProb class, implemented by the user. An instance of
7 LogProb returns the log of the probability density and
8 its derivative. The log probability function passed must
9 be concave.
10
11 The user must also supply initial guesses. It is not
12 essential that these values be very accurate, but performance
13 will generally depend on their accuracy.
14 """
15
16 from numpy import exp, log
17
19 """
20 Envelope function for adaptive rejection sampling.
21
22 The envelope defines a piecewise linear upper and lower
23 bounding function of the concave log-probability.
24 """
34
36 """
37 Support intervals for upper bounding function.
38 """
39 from numpy import concatenate
40
41 h = self.h
42 dh = self.dh
43 x = self.x
44
45 z = (h[1:] - h[:-1] + x[:-1] * dh[:-1] - x[1:] * dh[1:]) / \
46 (dh[:-1] - dh[1:])
47
48 return concatenate(([self.z0], z, [self.zk]))
49
51 """
52 Piecewise linear upper bounding function.
53 """
54 z = self.z()[1:-1]
55 j = (x > z).sum()
56
57 return self.h[j] + self.dh[j] * (x - self.x[j])
58
60
61 z = self.z()[1:-1]
62
63 return (self.h + self.dh * (z - self.x)).max()
64
66 """
67 Piecewise linear lower bounding function.
68 """
69 from numpy import inf
70
71 j = (x > self.x).sum()
72
73 if j == 0 or j == len(self.x):
74 return -inf
75 else:
76 j -= 1
77 return ((self.x[j + 1] - x) * self.h[j] + (x - self.x[j]) * self.h[j + 1]) / \
78 (self.x[j + 1] - self.x[j])
79
81 """
82 Insert new support point for lower bounding function
83 (and indirectly for upper bounding function).
84 """
85 from numpy import concatenate
86
87 j = (x > self.x).sum()
88
89 self.x = concatenate((self.x[:j], [x], self.x[j:]))
90 self.h = concatenate((self.h[:j], [h], self.h[j:]))
91 self.dh = concatenate((self.dh[:j], [dh], self.dh[j:]))
92
94
95 from numpy import abs, putmask
96
97 z = self.z()
98 b = self.h - self.x * self.dh
99 a = abs(self.dh)
100 m = (self.dh > 0)
101 q = self.x * 0.
102 putmask(q, m, z[1:])
103 putmask(q, 1 - m, z[:-1])
104
105 log_M = b - log(a) + log(1 - exp(-a * (z[1:] - z[:-1]))) + \
106 self.dh * q
107
108 return log_M
109
111
112 z = self.z()
113 b = self.h - self.x * self.dh
114 a = self.dh
115
116 return exp(b) * (exp(a * z[1:]) - exp(a * z[:-1])) / a
117
141
142
144
146 raise NotImplementedError()
147
149
154
156
157 return -0.5 * (x - self.mu) ** 2 / self.sigma ** 2, \
158 - (x - self.mu) / self.sigma ** 2
159
160
162
163 from numpy import inf
164
168
170
171 from numpy import array
172
173 self.hull = Envelope(array(x), *self.logp(array(x)))
174 self.hull.z0 = z0
175 self.hull.zk = zmax
176
177 - def sample(self, maxiter=100):
178
179 from numpy.random import random
180
181 for i in range(maxiter):
182
183 x = self.hull.sample()
184 l = self.hull.l(x)
185 u = self.hull.u(x)
186 w = random()
187
188 if w <= exp(l - u): return x
189
190 h, dh = self.logp(x)
191
192 if w <= exp(h - u): return x
193
194 self.hull.insert(x, h, dh)
195