View source on GitHub |
Variable tracking object which applies a bijector upon convert_to_tensor
.
Inherits From: DeferredTensor
tfp.substrates.jax.util.TransformedVariable(
initial_value, bijector, dtype=None, name=None, **kwargs
)
Example
from tensorflow_probability.python.internal.backend.jax.compat import v2 as tf
import tensorflow_probability as tfp; tfp = tfp.substrates.jax
tfb = tfp.bijectors
positive_variable = tfp.util.TransformedVariable(1., bijector=tfb.Exp())
positive_variable
# ==> <TransformedVariable: dtype=float32, shape=[], fn=exp>
# Note that the initial value corresponds to the transformed output.
tf.convert_to_tensor(positive_variable)
# ==> 1.
positive_variable.pretransformed_input
# ==> <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
# Operators work with `TransformedVariable`.
positive_variable + 1.
# ==> 2.
# It is also possible to assign values to a TransformedVariable
with tf.control_dependencies([positive_variable.assign_add(2.)]):
positive_variable
# ==> 3.
A common use case for the `TransformedVariable` is to fit constrained
parameters. E.g.:
```python
from tensorflow_probability.python.internal.backend.jax.compat import v2 as tf
import tensorflow_probability as tfp; tfp = tfp.substrates.jax
tfb = tfp.bijectors
tfd = tfp.distributions
trainable_normal = tfd.Normal(
loc=tf.Variable(0.),
scale=tfp.util.TransformedVariable(1., bijector=tfb.Exp()))
trainable_normal.loc
# ==> <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
trainable_normal.scale
# ==> <TransformedVariable: dtype=float32, shape=[], fn=exp>
with tf.GradientTape() as tape:
negloglik = -trainable_normal.log_prob(0.5)
g = tape.gradient(negloglik, trainable_normal.trainable_variables)
# ==> (-0.5, 0.75)
opt = tf.optimizers.Adam(learning_rate=0.05)
loss = tf.function(lambda: -trainable_normal.log_prob(0.5))
for _ in range(int(1e3)):
opt.minimize(loss, trainable_normal.trainable_variables)
trainable_normal.mean()
# ==> 0.5
trainable_normal.stddev()
# ==> (approximately) 0.0075
Methods
assign
assign(
value, **_
)
assign_add
assign_add(
value, **_
)
assign_sub
assign_sub(
value, **_
)
numpy
numpy()
Returns (copy of) deferred values as a NumPy array or scalar.
set_shape
set_shape(
shape
)
Updates the shape of this pretransformed_input.
This method can be called multiple times, and will merge the given shape
with the current shape of this object. It can be used to provide additional
information about the shape of this object that cannot be inferred from the
graph alone.
Args | |
---|---|
shape
|
A TensorShape representing the shape of this
pretransformed_input , a TensorShapeProto , a list, a tuple, or None.
|
Raises | |
---|---|
ValueError
|
If shape is not compatible with the current shape of this
pretransformed_input .
|
__abs__
__abs__(
*args, **kwargs
)
__add__
__add__(
*args, **kwargs
)
__and__
__and__(
*args, **kwargs
)
__array__
__array__(
dtype=None
)
__bool__
__bool__()
True if self else False
__floordiv__
__floordiv__(
*args, **kwargs
)
__ge__
__ge__(
*args, **kwargs
)
__getitem__
__getitem__(
*args, **kwargs
)
__gt__
__gt__(
*args, **kwargs
)
__invert__
__invert__(
*args, **kwargs
)
__iter__
__iter__(
*args, **kwargs
)
__le__
__le__(
*args, **kwargs
)
__lt__
__lt__(
*args, **kwargs
)
__matmul__
__matmul__(
*args, **kwargs
)
__mod__
__mod__(
*args, **kwargs
)
__mul__
__mul__(
*args, **kwargs
)
__neg__
__neg__(
*args, **kwargs
)
__or__
__or__(
*args, **kwargs
)
__pow__
__pow__(
*args, **kwargs
)
__radd__
__radd__(
*args, **kwargs
)
__rand__
__rand__(
*args, **kwargs
)
__rfloordiv__
__rfloordiv__(
*args, **kwargs
)
__rmatmul__
__rmatmul__(
*args, **kwargs
)
__rmod__
__rmod__(
*args, **kwargs
)
__rmul__
__rmul__(
*args, **kwargs
)
__ror__
__ror__(
*args, **kwargs
)
__rpow__
__rpow__(
*args, **kwargs
)
__rsub__
__rsub__(
*args, **kwargs
)
__rtruediv__
__rtruediv__(
*args, **kwargs
)
__rxor__
__rxor__(
*args, **kwargs
)
__sub__
__sub__(
*args, **kwargs
)
__truediv__
__truediv__(
*args, **kwargs
)
__xor__
__xor__(
*args, **kwargs
)