Skip to content

Commit

Permalink
Reimplement several RandomVariables as SymbolicRandomVariables
Browse files Browse the repository at this point in the history
This allows sampling from multiple backends without having to dispatch for each one
  • Loading branch information
ricardoV94 committed Apr 9, 2024
1 parent 47577ce commit 43f7a91
Show file tree
Hide file tree
Showing 3 changed files with 161 additions and 110 deletions.
172 changes: 110 additions & 62 deletions pymc/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def polyagamma_cdf(*args, **kwargs):

from scipy import stats
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.special import expit

from pymc.distributions import transforms
from pymc.distributions.dist_math import (
Expand All @@ -90,7 +89,7 @@ def polyagamma_cdf(*args, **kwargs):
normal_lcdf,
zvalue,
)
from pymc.distributions.distribution import DIST_PARAMETER_TYPES, Continuous
from pymc.distributions.distribution import DIST_PARAMETER_TYPES, Continuous, SymbolicRandomVariable
from pymc.distributions.shape_utils import rv_size_is_none
from pymc.distributions.transforms import _default_transform
from pymc.math import invlogit, logdiffexp, logit
Expand Down Expand Up @@ -1236,20 +1235,33 @@ def icdf(value, alpha, beta):
)


class KumaraswamyRV(RandomVariable):
class KumaraswamyRV(SymbolicRandomVariable):
name = "kumaraswamy"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "floatX"
signature = "[rng],[size],(),()->[rng],()"
_print_name = ("Kumaraswamy", "\\operatorname{Kumaraswamy}")

@classmethod
def rng_fn(cls, rng, a, b, size) -> np.ndarray:
u = rng.uniform(size=size)
return np.asarray((1 - (1 - u) ** (1 / b)) ** (1 / a))
def rv_op(cls, a, b, *, size=None, rng=None):
a = pt.as_tensor(a)
b = pt.as_tensor(b)

if rng is None:
rng = pytensor.shared(np.random.default_rng())

if size is not None:
size = pt.as_tensor(size, dtype="int64", ndim=1)

kumaraswamy = KumaraswamyRV()
if rv_size_is_none(size):
size = pt.as_tensor(pt.broadcast_shape(a, b))

next_rng, u = uniform(size=size, rng=rng).owner.outputs
draws = (1 - (1 - u) ** (1 / b)) ** (1 / a)

return cls(
inputs=[rng, size, a, b],
outputs=[next_rng, draws],
)(rng, size, a, b)


class Kumaraswamy(UnitContinuous):
Expand Down Expand Up @@ -1296,13 +1308,11 @@ class Kumaraswamy(UnitContinuous):
b > 0.
"""

rv_op = kumaraswamy
rv_type = KumaraswamyRV
rv_op = KumaraswamyRV.rv_op

@classmethod
def dist(cls, a: DIST_PARAMETER_TYPES, b: DIST_PARAMETER_TYPES, *args, **kwargs):
a = pt.as_tensor_variable(a)
b = pt.as_tensor_variable(b)

return super().dist([a, b], *args, **kwargs)

def support_point(rv, size, a, b):
Expand Down Expand Up @@ -1533,24 +1543,37 @@ def icdf(value, mu, b):
return check_icdf_parameters(res, b > 0, msg="b > 0")


class AsymmetricLaplaceRV(RandomVariable):
class AsymmetricLaplaceRV(SymbolicRandomVariable):
name = "asymmetriclaplace"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "floatX"
signature = "[rng],[size],(),(),()->[rng],()"
_print_name = ("AsymmetricLaplace", "\\operatorname{AsymmetricLaplace}")

@classmethod
def rng_fn(cls, rng, b, kappa, mu, size=None) -> np.ndarray:
u = rng.uniform(size=size)
def rv_op(cls, b, kappa, mu, *, size=None, rng=None):
b = pt.as_tensor(b)
kappa = pt.as_tensor(kappa)
mu = pt.as_tensor(mu)

if rng is None:
rng = pytensor.shared(np.random.default_rng())

if size is not None:
size = pt.as_tensor(size, dtype="int64", ndim=1)

if rv_size_is_none(size):
size = pt.as_tensor(pt.broadcast_shape(b, kappa, mu))

next_rng, u = uniform(size=size, rng=rng).owner.outputs
switch = kappa**2 / (1 + kappa**2)
non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
positive_x = mu - np.log((1 - u) * (1 + kappa**2)) / (kappa * b)
non_positive_x = mu + kappa * pt.log(u * (1 / switch)) / b
positive_x = mu - pt.log((1 - u) * (1 + kappa**2)) / (kappa * b)
draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
return np.asarray(draws)


asymmetriclaplace = AsymmetricLaplaceRV()
return cls(
inputs=[rng, size, b, kappa, mu],
outputs=[next_rng, draws],
)(rng, size, b, kappa, mu)


class AsymmetricLaplace(Continuous):
Expand Down Expand Up @@ -1599,15 +1622,12 @@ class AsymmetricLaplace(Continuous):
of interest.
"""

rv_op = asymmetriclaplace
rv_type = AsymmetricLaplaceRV
rv_op = AsymmetricLaplaceRV.rv_op

@classmethod
def dist(cls, kappa=None, mu=None, b=None, q=None, *args, **kwargs):
kappa = cls.get_kappa(kappa, q)
b = pt.as_tensor_variable(b)
kappa = pt.as_tensor_variable(kappa)
mu = pt.as_tensor_variable(mu)

return super().dist([b, kappa, mu], *args, **kwargs)

@classmethod
Expand Down Expand Up @@ -2475,7 +2495,6 @@ def dist(cls, nu, **kwargs):
return Gamma.dist(alpha=nu / 2, beta=1 / 2, **kwargs)


# TODO: Remove this once logp for multiplication is working!
class WeibullBetaRV(RandomVariable):
name = "weibull"
ndim_supp = 0
Expand Down Expand Up @@ -2597,19 +2616,30 @@ def icdf(value, alpha, beta):
)


class HalfStudentTRV(RandomVariable):
class HalfStudentTRV(SymbolicRandomVariable):
name = "halfstudentt"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "floatX"
signature = "[rng],[size],(),()->[rng],()"
_print_name = ("HalfStudentT", "\\operatorname{HalfStudentT}")

@classmethod
def rng_fn(cls, rng, nu, sigma, size=None) -> np.ndarray:
return np.asarray(np.abs(stats.t.rvs(nu, scale=sigma, size=size, random_state=rng)))
def rv_op(cls, nu, sigma, *, size=None, rng=None) -> np.ndarray:
nu = pt.as_tensor(nu)
sigma = pt.as_tensor(sigma)

if rng is None:
rng = pytensor.shared(np.random.default_rng())

if size is not None:
size = pt.as_tensor(size, dtype="int64", ndim=1)

if rv_size_is_none(size):
size = pt.as_tensor([], dtype="int64", ndim=1)

next_rng, t_draws = t(df=nu, scale=sigma, size=size, rng=rng).owner.outputs
draws = pt.abs(t_draws)

halfstudentt = HalfStudentTRV()
return cls(inputs=[rng, size, nu, sigma], outputs=[next_rng, draws])(rng, size, nu, sigma)


class HalfStudentT(PositiveContinuous):
Expand Down Expand Up @@ -2671,14 +2701,12 @@ class HalfStudentT(PositiveContinuous):
x = pm.HalfStudentT('x', lam=4, nu=10)
"""

rv_op = halfstudentt
rv_type = HalfStudentTRV
rv_op = HalfStudentTRV.rv_op

@classmethod
def dist(cls, nu, sigma=None, lam=None, *args, **kwargs):
nu = pt.as_tensor_variable(nu)
lam, sigma = get_tau_sigma(lam, sigma)
sigma = pt.as_tensor_variable(sigma)

return super().dist([nu, sigma], *args, **kwargs)

def support_point(rv, size, nu, sigma):
Expand Down Expand Up @@ -2710,19 +2738,34 @@ def logp(value, nu, sigma):
)


class ExGaussianRV(RandomVariable):
class ExGaussianRV(SymbolicRandomVariable):
name = "exgaussian"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "floatX"
signature = "[rng],[size],(),(),()->[rng],()"
_print_name = ("ExGaussian", "\\operatorname{ExGaussian}")

@classmethod
def rng_fn(cls, rng, mu, sigma, nu, size=None) -> np.ndarray:
return np.asarray(rng.normal(mu, sigma, size=size) + rng.exponential(scale=nu, size=size))
def rv_op(cls, mu, sigma, nu, *, size=None, rng=None):
mu = pt.as_tensor(mu)
sigma = pt.as_tensor(sigma)
nu = pt.as_tensor(nu)

if rng is None:
rng = pytensor.shared(np.random.default_rng())

if size is not None:
size = pt.as_tensor(size, dtype="int64", ndim=1)

exgaussian = ExGaussianRV()
if rv_size_is_none(size):
size = pt.as_tensor(pt.broadcast_shape(mu, sigma, nu))

next_rng, normal_draws = normal(loc=mu, scale=sigma, size=size, rng=rng)
final_rng, exponential_draws = exponential(scale=nu, size=size, rng=next_rng)
draws = normal_draws + exponential_draws

return cls(inputs=[rng, size, mu, sigma, nu], outputs=[final_rng, draws])(
rng, size, mu, sigma, nu
)


class ExGaussian(Continuous):
Expand Down Expand Up @@ -2792,14 +2835,11 @@ class ExGaussian(Continuous):
Vol. 4, No. 1, pp 35-45.
"""

rv_op = exgaussian
rv_type = ExGaussianRV
rv_op = ExGaussianRV.rv_op

@classmethod
def dist(cls, mu=0.0, sigma=None, nu=None, *args, **kwargs):
mu = pt.as_tensor_variable(mu)
sigma = pt.as_tensor_variable(sigma)
nu = pt.as_tensor_variable(nu)

return super().dist([mu, sigma, nu], *args, **kwargs)

def support_point(rv, size, mu, sigma, nu):
Expand Down Expand Up @@ -3477,19 +3517,30 @@ def icdf(value, mu, s):
)


class LogitNormalRV(RandomVariable):
class LogitNormalRV(SymbolicRandomVariable):
name = "logit_normal"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "floatX"
signature = "[rng],[size],(),()->[rng],()"
_print_name = ("logitNormal", "\\operatorname{logitNormal}")

@classmethod
def rng_fn(cls, rng, mu, sigma, size=None) -> np.ndarray:
return np.asarray(expit(stats.norm.rvs(loc=mu, scale=sigma, size=size, random_state=rng)))
def rv_op(cls, mu, sigma, *, size=None, rng=None):
mu = pt.as_tensor(mu)
sigma = pt.as_tensor(sigma)

if rng is None:
rng = pytensor.shared(np.random.default_rng())

if size is None:
size = pt.as_tensor(np.asarray([], dtype="int64"), ndim=1)

next_rng, normal_draws = normal(loc=mu, scale=sigma, size=size, rng=rng).owner.outputs
draws = pt.expit(normal_draws)

logit_normal = LogitNormalRV()
return cls(
inputs=[rng, size, mu, sigma],
outputs=[next_rng, draws],
)(rng, size, mu, sigma)


class LogitNormal(UnitContinuous):
Expand Down Expand Up @@ -3540,15 +3591,12 @@ class LogitNormal(UnitContinuous):
Defaults to 1.
"""

rv_op = logit_normal
rv_type = LogitNormalRV
rv_op = LogitNormalRV.rv_op

@classmethod
def dist(cls, mu=0, sigma=None, tau=None, **kwargs):
mu = pt.as_tensor_variable(mu)
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
sigma = pt.as_tensor_variable(sigma)
tau = pt.as_tensor_variable(tau)

_, sigma = get_tau_sigma(tau=tau, sigma=sigma)
return super().dist([mu, sigma], **kwargs)

def support_point(rv, size, mu, sigma):
Expand Down
Loading

0 comments on commit 43f7a91

Please sign in to comment.