misc
Miscellaneous utility functions
DTypeAndDevice (tuple)
¶
ErroneousResult
¶
Representation of a caught error being returned as a result.
Source code in evotorch/tools/misc.py
class ErroneousResult:
"""
Representation of a caught error being returned as a result.
"""
def __init__(self, error: Exception):
self.error = error
def _to_string(self) -> str:
return f"<{type(self).__name__}, error: {self.error}>"
def __str__(self) -> str:
return self._to_string()
def __repr__(self) -> str:
return self._to_string()
def __bool__(self) -> bool:
return False
@staticmethod
def call(f, *args, **kwargs) -> Any:
"""
Call a function with the given arguments.
If the function raises an error, wrap the error in an ErroneousResult
object, and return that ErroneousResult object instead.
Returns:
The result of the function if there was no error,
or an ErroneousResult if there was an error.
"""
try:
result = f(*args, **kwargs)
except Exception as ex:
result = ErroneousResult(ex)
return result
call(f, *args, **kwargs)
staticmethod
¶
Call a function with the given arguments. If the function raises an error, wrap the error in an ErroneousResult object, and return that ErroneousResult object instead.
Returns:
Type | Description |
---|---|
Any |
The result of the function if there was no error, or an ErroneousResult if there was an error. |
Source code in evotorch/tools/misc.py
@staticmethod
def call(f, *args, **kwargs) -> Any:
"""
Call a function with the given arguments.
If the function raises an error, wrap the error in an ErroneousResult
object, and return that ErroneousResult object instead.
Returns:
The result of the function if there was no error,
or an ErroneousResult if there was an error.
"""
try:
result = f(*args, **kwargs)
except Exception as ex:
result = ErroneousResult(ex)
return result
as_tensor(x, *, dtype=None, device=None)
¶
Get the tensor counterpart of the given object x
.
This function can be used to convert native Python objects to tensors:
my_tensor = as_tensor([1.0, 2.0, 3.0], dtype="float32")
One can also use this function to convert an existing tensor to another dtype:
my_new_tensor = as_tensor(my_tensor, dtype="float16")
This function can also be used for moving a tensor from one device to another:
my_gpu_tensor = as_tensor(my_tensor, device="cuda:0")
This function can also create ObjectArray instances when dtype is
given as object
or Any
or "object" or "O".
my_objects = as_tensor([1, {"a": 3}], dtype=object)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
Any object to be converted to a tensor. |
required |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32) or, for creating an |
None |
device |
Union[str, torch.device] |
The device in which the resulting tensor will be stored. |
None |
Returns:
Type | Description |
---|---|
Iterable |
The tensor counterpart of the given object |
Source code in evotorch/tools/misc.py
def as_tensor(x: Any, *, dtype: Optional[DType] = None, device: Optional[Device] = None) -> Iterable:
"""
Get the tensor counterpart of the given object `x`.
This function can be used to convert native Python objects to tensors:
my_tensor = as_tensor([1.0, 2.0, 3.0], dtype="float32")
One can also use this function to convert an existing tensor to another
dtype:
my_new_tensor = as_tensor(my_tensor, dtype="float16")
This function can also be used for moving a tensor from one device to
another:
my_gpu_tensor = as_tensor(my_tensor, device="cuda:0")
This function can also create ObjectArray instances when dtype is
given as `object` or `Any` or "object" or "O".
my_objects = as_tensor([1, {"a": 3}], dtype=object)
Args:
x: Any object to be converted to a tensor.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32) or, for creating an `ObjectArray`,
"object" (as string) or `object` or `Any`.
If `dtype` is not specified, the default behavior of
`torch.as_tensor(...)` will be used, that is, dtype will be
inferred from `x`.
device: The device in which the resulting tensor will be stored.
Returns:
The tensor counterpart of the given object `x`.
"""
from .objectarray import ObjectArray
if (dtype is None) and isinstance(x, (torch.Tensor, ObjectArray)):
if (device is None) or (str(device) == "cpu"):
return x
else:
raise ValueError(
f"An ObjectArray cannot be moved into a device other than 'cpu'." f" The received device is: {device}."
)
elif is_dtype_object(dtype):
if (device is None) or (str(device) == "cpu"):
raise ValueError(
f"An ObjectArray cannot be created on a device other than 'cpu'." f" The received device is: {device}."
)
if isinstance(x, ObjectArray):
return x
else:
x = list(x)
n = len(x)
result = ObjectArray(n)
result[:] = x
return result
else:
dtype = to_torch_dtype(dtype)
return torch.as_tensor(x, dtype=dtype, device=device)
clip_tensor(x, lb=None, ub=None, ensure_copy=True)
¶
Clip the values of a tensor with respect to the given bounds.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Tensor |
The PyTorch tensor whose values will be clipped. |
required |
lb |
Union[float, Iterable] |
Lower bounds, as a PyTorch tensor. Can be None if there are no lower bounds. |
None |
ub |
Union[float, Iterable] |
Upper bounds, as a PyTorch tensor. Can be None if there are no upper bonuds. |
None |
ensure_copy |
bool |
If |
True |
Returns:
Type | Description |
---|---|
Tensor |
The clipped tensor. |
Source code in evotorch/tools/misc.py
@torch.no_grad()
def clip_tensor(
x: torch.Tensor,
lb: Optional[Union[float, Iterable]] = None,
ub: Optional[Union[float, Iterable]] = None,
ensure_copy: bool = True,
) -> torch.Tensor:
"""
Clip the values of a tensor with respect to the given bounds.
Args:
x: The PyTorch tensor whose values will be clipped.
lb: Lower bounds, as a PyTorch tensor.
Can be None if there are no lower bounds.
ub: Upper bounds, as a PyTorch tensor.
Can be None if there are no upper bonuds.
ensure_copy: If `ensure_copy` is True, the result will be
a clipped copy of the original tensor.
If `ensure_copy` is False, and both `lb` and `ub`
are None, then there is nothing to do, so, the result
will be the original tensor itself, not a copy of it.
Returns:
The clipped tensor.
"""
result = x
if lb is not None:
lb = torch.as_tensor(lb, dtype=x.dtype, device=x.device)
result = torch.max(result, lb)
if ub is not None:
ub = torch.as_tensor(ub, dtype=x.dtype, device=x.device)
result = torch.min(result, ub)
if ensure_copy and result is x:
result = x.clone()
return result
clone(x, *, memo=None)
¶
Get a deep copy of the given object.
The cloning is done in no_grad mode.
Returns:
Type | Description |
---|---|
Any |
The deep copy of the given object. |
device_of(x)
¶
Get the device of the given object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
The object whose device is being queried.
The object can be a PyTorch tensor, or a PyTorch module
(in which case the device of the first parameter tensor
will be returned), or an ObjectArray (in which case
the returned device will be the cpu device), or any object
with the attribute |
required |
Returns:
Type | Description |
---|---|
Union[str, torch.device] |
The device of the given object. |
Source code in evotorch/tools/misc.py
def device_of(x: Any) -> Device:
"""
Get the device of the given object.
Args:
x: The object whose device is being queried.
The object can be a PyTorch tensor, or a PyTorch module
(in which case the device of the first parameter tensor
will be returned), or an ObjectArray (in which case
the returned device will be the cpu device), or any object
with the attribute `device`.
Returns:
The device of the given object.
"""
if isinstance(x, nn.Module):
result = None
for param in x.parameters():
result = param.device
break
if result is None:
raise ValueError(f"Cannot determine the device of the module {x}")
return result
else:
return x.device
device_of_container(container)
¶
Get the device of the given container.
It is assumed that the given container stores PyTorch tensors from which the device information will be extracted. If the container contains only basic types like int, float, string, bool, or None, or if the container is empty, then the returned device will be None. If the container contains unrecognized objects, an error will be raised.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
container |
Any |
A sequence or a dictionary of objects from which the device information will be extracted. |
required |
Returns:
Type | Description |
---|---|
Optional[torch.device] |
The device if available, None otherwise. |
Source code in evotorch/tools/misc.py
def device_of_container(container: Any) -> Optional[torch.device]:
"""
Get the device of the given container.
It is assumed that the given container stores PyTorch tensors from
which the device information will be extracted.
If the container contains only basic types like int, float, string,
bool, or None, or if the container is empty, then the returned device
will be None.
If the container contains unrecognized objects, an error will be
raised.
Args:
container: A sequence or a dictionary of objects from which the
device information will be extracted.
Returns:
The device if available, None otherwise.
"""
class result:
device: Optional[torch.device] = None
@classmethod
def update(cls, new_device: Optional[torch.device]):
if new_device is not None:
if cls.device is None:
cls.device = new_device
else:
if new_device != cls.device:
raise ValueError(f"Encountered tensors whose `device`s mismatch: {new_device}, {cls.device}")
if isinstance(container, torch.Tensor):
result.update(container.device)
elif (container is None) or isinstance(container, (Number, str, bytes, bool)):
pass
elif isinstance(container, Mapping):
for _, v in container.items():
result.update(device_of_container(v))
elif isinstance(container, Iterable):
for v in container:
result.update(device_of_container(v))
else:
raise TypeError(f"Encountered an object of unrecognized type: {type(container)}")
return result.device
dtype_of(x)
¶
Get the dtype of the given object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
The object whose dtype is being queried.
The object can be a PyTorch tensor, or a PyTorch module
(in which case the dtype of the first parameter tensor
will be returned), or an ObjectArray (in which case
the returned dtype will be |
required |
Returns:
Type | Description |
---|---|
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype of the given object. |
Source code in evotorch/tools/misc.py
def dtype_of(x: Any) -> DType:
"""
Get the dtype of the given object.
Args:
x: The object whose dtype is being queried.
The object can be a PyTorch tensor, or a PyTorch module
(in which case the dtype of the first parameter tensor
will be returned), or an ObjectArray (in which case
the returned dtype will be `object`), or any object with
the attribute `dtype`.
Returns:
The dtype of the given object.
"""
if isinstance(x, nn.Module):
result = None
for param in x.parameters():
result = param.dtype
break
if result is None:
raise ValueError(f"Cannot determine the dtype of the module {x}")
return result
else:
return x.dtype
dtype_of_container(container)
¶
Get the dtype of the given container.
It is assumed that the given container stores PyTorch tensors from which the dtype information will be extracted. If the container contains only basic types like int, float, string, bool, or None, or if the container is empty, then the returned dtype will be None. If the container contains unrecognized objects, an error will be raised.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
container |
Any |
A sequence or a dictionary of objects from which the dtype information will be extracted. |
required |
Returns:
Type | Description |
---|---|
Optional[torch.dtype] |
The dtype if available, None otherwise. |
Source code in evotorch/tools/misc.py
def dtype_of_container(container: Any) -> Optional[torch.dtype]:
"""
Get the dtype of the given container.
It is assumed that the given container stores PyTorch tensors from
which the dtype information will be extracted.
If the container contains only basic types like int, float, string,
bool, or None, or if the container is empty, then the returned dtype
will be None.
If the container contains unrecognized objects, an error will be
raised.
Args:
container: A sequence or a dictionary of objects from which the
dtype information will be extracted.
Returns:
The dtype if available, None otherwise.
"""
class result:
dtype: Optional[torch.dtype] = None
@classmethod
def update(cls, new_dtype: Optional[torch.dtype]):
if new_dtype is not None:
if cls.dtype is None:
cls.dtype = new_dtype
else:
if new_dtype != cls.dtype:
raise ValueError(f"Encountered tensors whose `dtype`s mismatch: {new_dtype}, {cls.dtype}")
if isinstance(container, torch.Tensor):
result.update(container.dtype)
elif (container is None) or isinstance(container, (Number, str, bytes, bool)):
pass
elif isinstance(container, Mapping):
for _, v in container.items():
result.update(dtype_of_container(v))
elif isinstance(container, Iterable):
for v in container:
result.update(dtype_of_container(v))
else:
raise TypeError(f"Encountered an object of unrecognized type: {type(container)}")
return result.dtype
empty_tensor_like(source, *, shape=None, length=None, dtype=None, device=None)
¶
Make an empty tensor with attributes taken from a source tensor.
The source tensor can be a PyTorch tensor, or an ObjectArray.
Unlike torch.empty_like(...)
, this function allows one to redefine the
shape and/or length of the new empty tensor.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
source |
Any |
The source tensor whose shape, dtype, and device will be used by default for the new empty tensor. |
required |
shape |
Union[tuple, int] |
If given as None (which is the default), then the shape of the
source tensor will be used for the new empty tensor.
If given as a tuple or a |
None |
length |
Optional[int] |
If given as None (which is the default), then the length of
the new empty tensor will be equal to the length of the source
tensor (where length here means the size of the outermost
dimension, i.e., what is returned by |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
If given as None, the dtype of the new empty tensor will be
the dtype of the source tensor.
If given as a |
None |
device |
Union[str, torch.device] |
If given as None, the device of the new empty tensor will be
the device of the source tensor.
If given as a |
None |
Returns:
Type | Description |
---|---|
Any |
The new empty tensor. |
Source code in evotorch/tools/misc.py
def empty_tensor_like(
source: Any,
*,
shape: Optional[Union[tuple, int]] = None,
length: Optional[int] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> Any:
"""
Make an empty tensor with attributes taken from a source tensor.
The source tensor can be a PyTorch tensor, or an ObjectArray.
Unlike `torch.empty_like(...)`, this function allows one to redefine the
shape and/or length of the new empty tensor.
Args:
source: The source tensor whose shape, dtype, and device will be used
by default for the new empty tensor.
shape: If given as None (which is the default), then the shape of the
source tensor will be used for the new empty tensor.
If given as a tuple or a `torch.Size` instance, then the new empty
tensor will be in this given shape instead.
This argument cannot be used together with `length`.
length: If given as None (which is the default), then the length of
the new empty tensor will be equal to the length of the source
tensor (where length here means the size of the outermost
dimension, i.e., what is returned by `len(...)`).
If given as an integer, the length of the empty tensor will be
this given length instead.
This argument cannot be used together with `shape`.
dtype: If given as None, the dtype of the new empty tensor will be
the dtype of the source tensor.
If given as a `torch.dtype` instance, then the dtype of the
tensor will be this given dtype instead.
device: If given as None, the device of the new empty tensor will be
the device of the source tensor.
If given as a `torch.device` instance, then the device of the
tensor will be this given device instead.
Returns:
The new empty tensor.
"""
from .objectarray import ObjectArray
if isinstance(source, ObjectArray):
if length is not None and shape is None:
n = int(length)
elif shape is not None and length is None:
if isinstance(shape, Iterable):
if len(shape) != 1:
raise ValueError(
f"An ObjectArray must always be 1-dimensional."
f" Therefore, this given shape is incompatible: {shape}"
)
n = int(shape[0])
elif length is None and shape is None:
n = len(source)
else:
raise ValueError("`length` and `shape` cannot be used together")
if device is not None:
if str(device) != "cpu":
raise ValueError(
f"An ObjectArray can only be allocated on cpu. However, the specified `device` is: {device}."
)
if dtype is not None:
if not is_dtype_object(dtype):
raise ValueError(
f"The dtype of an ObjectArray can only be `object`. However, the specified `dtype` is: {dtype}."
)
return ObjectArray(n)
elif isinstance(source, torch.Tensor):
if length is not None:
if shape is not None:
raise ValueError("`length` and `shape` cannot be used together")
if source.ndim == 0:
raise ValueError("`length` can only be used when the source tensor is at least 1-dimensional")
newshape = [int(length)]
newshape.extend(source.shape[1:])
shape = tuple(newshape)
if not ((dtype is None) or isinstance(dtype, torch.dtype)):
dtype = to_torch_dtype(dtype)
return torch.empty(
source.shape if shape is None else shape,
dtype=(source.dtype if dtype is None else dtype),
device=(source.device if device is None else device),
)
else:
raise TypeError(f"The source tensor is of an unrecognized type: {type(source)}")
ensure_ray()
¶
Ensure that the ray parallelization engine is initialized. If ray is already initialized, this function does nothing.
ensure_tensor_length_and_dtype(t, length, dtype, about=None, *, allow_scalar=False, device=None)
¶
Return the given sequence as a tensor while also confirming its length, dtype, and device. If the given object is already a tensor conforming to the desired length, dtype, and device, the object will be returned as it is (there will be no copying).
Parameters:
Name | Type | Description | Default |
---|---|---|---|
t |
Any |
The tensor, or a sequence which is convertible to a tensor. |
required |
length |
int |
The length to which the tensor is expected to conform. |
required |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype to which the tensor is expected to conform. |
required |
about |
Optional[str] |
The prefix for the error message. Can be left as None. |
None |
allow_scalar |
bool |
Whether or not to accept scalars in addition
to vector of the desired length.
If |
False |
device |
Union[str, torch.device] |
The device in which the sequence is to be stored.
If the given sequence is on a different device than the
desired device, a copy on the correct device will be made.
If device is None, the default behavior of |
None |
Returns:
Type | Description |
---|---|
The sequence whose correctness in terms of length, dtype, and device is ensured. |
Exceptions:
Type | Description |
---|---|
ValueError |
if there is a length mismatch. |
Source code in evotorch/tools/misc.py
@torch.no_grad()
def ensure_tensor_length_and_dtype(
t: Any,
length: int,
dtype: DType,
about: Optional[str] = None,
*,
allow_scalar: bool = False,
device: Optional[Device] = None,
):
"""
Return the given sequence as a tensor while also confirming its
length, dtype, and device.
If the given object is already a tensor conforming to the desired
length, dtype, and device, the object will be returned as it is
(there will be no copying).
Args:
t: The tensor, or a sequence which is convertible to a tensor.
length: The length to which the tensor is expected to conform.
dtype: The dtype to which the tensor is expected to conform.
about: The prefix for the error message. Can be left as None.
allow_scalar: Whether or not to accept scalars in addition
to vector of the desired length.
If `allow_scalar` is False, then scalars will be converted
to sequences of the desired length. The sequence will contain
the same scalar, repeated.
If `allow_scalar` is True, then the scalar itself will be
converted to a PyTorch scalar, and then will be returned.
device: The device in which the sequence is to be stored.
If the given sequence is on a different device than the
desired device, a copy on the correct device will be made.
If device is None, the default behavior of `torch.tensor(...)`
will be used, that is: if `t` is already a tensor, the result
will be on the same device, otherwise, the result will be on
the cpu.
Returns:
The sequence whose correctness in terms of length, dtype, and
device is ensured.
Raises:
ValueError: if there is a length mismatch.
"""
device_args = {}
if device is not None:
device_args["device"] = device
t = as_tensor(t, dtype=dtype, **device_args)
if t.ndim == 0:
if allow_scalar:
return t
else:
return t.repeat(length)
else:
if t.ndim != 1 or len(t) != length:
if about is not None:
err_prefix = about + ": "
else:
err_prefix = ""
raise ValueError(
f"{err_prefix}Expected a 1-dimensional tensor of length {length}, but got a tensor with shape: {t.shape}"
)
return t
expect_none(msg_prefix, **kwargs)
¶
Expect the values associated with the given keyword arguments to be None. If not, raise error.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
msg_prefix |
str |
Prefix of the error message. |
required |
kwargs |
Keyword arguments whose values are expected to be None. |
{} |
Exceptions:
Type | Description |
---|---|
ValueError |
if at least one of the keyword arguments has a value other than None. |
Source code in evotorch/tools/misc.py
def expect_none(msg_prefix: str, **kwargs):
"""
Expect the values associated with the given keyword arguments
to be None. If not, raise error.
Args:
msg_prefix: Prefix of the error message.
kwargs: Keyword arguments whose values are expected to be None.
Raises:
ValueError: if at least one of the keyword arguments has a value
other than None.
"""
for k, v in kwargs.items():
if v is not None:
raise ValueError(f"{msg_prefix}: expected `{k}` as None, however, it was found to be {repr(v)}")
is_bool(x)
¶
Return True if x
represents a bool.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose type is being queried. |
required |
Returns:
Type | Description |
---|---|
bool |
True if |
Source code in evotorch/tools/misc.py
def is_bool(x: Any) -> bool:
"""
Return True if `x` represents a bool.
Args:
x: An object whose type is being queried.
Returns:
True if `x` is a bool; False otherwise.
"""
if isinstance(x, (bool, np.bool_)):
return True
elif isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim > 0:
return False
else:
return is_dtype_bool(x.dtype)
else:
return False
is_bool_vector(x)
¶
Return True if x
is a vector consisting of bools.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose elements' types are to be queried. |
required |
Returns:
Type | Description |
---|---|
True if the elements of |
Source code in evotorch/tools/misc.py
def is_bool_vector(x: Any):
"""
Return True if `x` is a vector consisting of bools.
Args:
x: An object whose elements' types are to be queried.
Returns:
True if the elements of `x` are bools; False otherwise.
"""
if isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim != 1:
return False
else:
return is_dtype_bool(x.dtype)
elif isinstance(x, Iterable):
for item in x:
if not is_bool(item):
return False
return True
else:
return False
is_dtype_bool(t)
¶
Return True if the given dtype is an bool type.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
t |
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype, which can be a dtype string, a numpy dtype, or a PyTorch dtype. |
required |
Returns:
Type | Description |
---|---|
bool |
True if t is a bool type; False otherwise. |
Source code in evotorch/tools/misc.py
is_dtype_float(t)
¶
Return True if the given dtype is an float type.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
t |
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype, which can be a dtype string, a numpy dtype, or a PyTorch dtype. |
required |
Returns:
Type | Description |
---|---|
bool |
True if t is an float type; False otherwise. |
Source code in evotorch/tools/misc.py
is_dtype_integer(t)
¶
Return True if the given dtype is an integer type.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
t |
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype, which can be a dtype string, a numpy dtype, or a PyTorch dtype. |
required |
Returns:
Type | Description |
---|---|
bool |
True if t is an integer type; False otherwise. |
Source code in evotorch/tools/misc.py
def is_dtype_integer(t: DType) -> bool:
"""
Return True if the given dtype is an integer type.
Args:
t: The dtype, which can be a dtype string, a numpy dtype,
or a PyTorch dtype.
Returns:
True if t is an integer type; False otherwise.
"""
t: np.dtype = to_numpy_dtype(t)
return t.kind.startswith("u") or t.kind.startswith("i")
is_dtype_object(dtype)
¶
Return True if the given dtype is object
or Any
.
Returns:
Type | Description |
---|---|
bool |
True if the given dtype is |
Source code in evotorch/tools/misc.py
def is_dtype_object(dtype: DType) -> bool:
"""
Return True if the given dtype is `object` or `Any`.
Returns:
True if the given dtype is `object` or `Any`; False otherwise.
"""
if isinstance(dtype, str):
return dtype in ("object", "Any", "O")
elif dtype is object or dtype is Any:
return True
else:
return False
is_dtype_real(t)
¶
Return True if the given dtype represents real numbers (i.e. if dtype is an integer type or is a float type).
Parameters:
Name | Type | Description | Default |
---|---|---|---|
t |
Union[str, torch.dtype, numpy.dtype, Type] |
The dtype, which can be a dtype string, a numpy dtype, or a PyTorch dtype. |
required |
Returns:
Type | Description |
---|---|
bool |
True if t represents a real numbers type; False otherwise. |
Source code in evotorch/tools/misc.py
def is_dtype_real(t: DType) -> bool:
"""
Return True if the given dtype represents real numbers
(i.e. if dtype is an integer type or is a float type).
Args:
t: The dtype, which can be a dtype string, a numpy dtype,
or a PyTorch dtype.
Returns:
True if t represents a real numbers type; False otherwise.
"""
return is_dtype_float(t) or is_dtype_integer(t)
is_integer(x)
¶
Return True if x
is an integer.
Note that this function does NOT consider booleans as integers.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose type is being queried. |
required |
Returns:
Type | Description |
---|---|
bool |
True if |
Source code in evotorch/tools/misc.py
def is_integer(x: Any) -> bool:
"""
Return True if `x` is an integer.
Note that this function does NOT consider booleans as integers.
Args:
x: An object whose type is being queried.
Returns:
True if `x` is an integer; False otherwise.
"""
if is_bool(x):
return False
elif isinstance(x, Integral):
return True
elif isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim > 0:
return False
else:
return is_dtype_integer(x.dtype)
else:
return False
is_integer_vector(x)
¶
Return True if x
is a vector consisting of integers.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose elements' types are to be queried. |
required |
Returns:
Type | Description |
---|---|
True if the elements of |
Source code in evotorch/tools/misc.py
def is_integer_vector(x: Any):
"""
Return True if `x` is a vector consisting of integers.
Args:
x: An object whose elements' types are to be queried.
Returns:
True if the elements of `x` are integers; False otherwise.
"""
if isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim != 1:
return False
else:
return is_dtype_integer(x.dtype)
elif isinstance(x, Iterable):
for item in x:
if not is_integer(item):
return False
return True
else:
return False
is_real(x)
¶
Return True if x
is a real number.
Note that this function does NOT consider booleans as real numbers.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose type is being queried. |
required |
Returns:
Type | Description |
---|---|
bool |
True if |
Source code in evotorch/tools/misc.py
def is_real(x: Any) -> bool:
"""
Return True if `x` is a real number.
Note that this function does NOT consider booleans as real numbers.
Args:
x: An object whose type is being queried.
Returns:
True if `x` is a real number; False otherwise.
"""
if is_bool(x):
return False
elif isinstance(x, Real):
return True
elif isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim > 0:
return False
else:
return is_dtype_real(x.dtype)
else:
return False
is_real_vector(x)
¶
Return True if x
is a vector consisting of real numbers.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
An object whose elements' types are to be queried. |
required |
Returns:
Type | Description |
---|---|
True if the elements of |
Source code in evotorch/tools/misc.py
def is_real_vector(x: Any):
"""
Return True if `x` is a vector consisting of real numbers.
Args:
x: An object whose elements' types are to be queried.
Returns:
True if the elements of `x` are real numbers; False otherwise.
"""
if isinstance(x, (torch.Tensor, np.ndarray)):
if x.ndim != 1:
return False
else:
return is_dtype_real(x.dtype)
elif isinstance(x, Iterable):
for item in x:
if not is_real(item):
return False
return True
else:
return False
is_sequence(x)
¶
Return True if x
is a sequence.
Note that this function considers str
and bytes
as scalars,
not as sequences.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Any |
The object whose sequential nature is being queried. |
required |
Returns:
Type | Description |
---|---|
bool |
True if |
Source code in evotorch/tools/misc.py
def is_sequence(x: Any) -> bool:
"""
Return True if `x` is a sequence.
Note that this function considers `str` and `bytes` as scalars,
not as sequences.
Args:
x: The object whose sequential nature is being queried.
Returns:
True if `x` is a sequence; False otherwise.
"""
if isinstance(x, (str, bytes)):
return False
elif isinstance(x, (np.ndarray, torch.Tensor)):
return x.ndim > 0
elif isinstance(x, Iterable):
return True
else:
return False
is_tensor_on_cpu(tensor)
¶
make_I(size=None, out=None, dtype=None, device=None)
¶
Make a new identity matrix (I), or change an existing tensor into one.
The following example creates a 3x3 identity matrix:
identity_matrix = make_I(3, dtype="float32")
The following example changes an already existing square matrix such that its values will store an identity matrix:
make_I(out=existing_tensor)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Optional[int] |
A single integer specifying the length of the target square
matrix. In this context, "length" means both rowwise length
and columnwise length, since the target is a square matrix.
Note that, if the user wishes to fill an existing tensor with
identity values, then |
None |
out |
Optional[torch.Tensor] |
Optionally, the existing tensor whose values will be changed
so that they represent an identity matrix.
If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing the I matrix values |
Source code in evotorch/tools/misc.py
def make_I(
size: Optional[int] = None,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> torch.Tensor:
"""
Make a new identity matrix (I), or change an existing tensor into one.
The following example creates a 3x3 identity matrix:
identity_matrix = make_I(3, dtype="float32")
The following example changes an already existing square matrix such that
its values will store an identity matrix:
make_I(out=existing_tensor)
Args:
size: A single integer specifying the length of the target square
matrix. In this context, "length" means both rowwise length
and columnwise length, since the target is a square matrix.
Note that, if the user wishes to fill an existing tensor with
identity values, then `size` is expected to be left as None.
out: Optionally, the existing tensor whose values will be changed
so that they represent an identity matrix.
If an `out` tensor is given, then `size` is expected as None.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
Returns:
The created or modified tensor after placing the I matrix values
"""
if size is None:
if out is None:
raise ValueError(
" When the `size` argument is missing, `make_I(...)` expects an `out` tensor."
" However, the `out` argument was received as None."
)
size = tuple()
else:
n = int(size)
size = (n, n)
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
out.zero_()
out.fill_diagonal_(1)
return out
make_empty(*size, *, dtype=None, device=None)
¶
Make an empty tensor.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Shape of the empty tensor to be created.
expected as multiple positional arguments of integers,
or as a single positional argument containing a tuple of
integers.
Note that when the user wishes to create an |
() |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32) or, for creating an |
None |
device |
Union[str, torch.device] |
The device in which the new empty tensor will be stored. If not specified, "cpu" will be used. |
None |
Returns:
Type | Description |
---|---|
Iterable |
The new empty tensor, which can be a PyTorch tensor or an
|
Source code in evotorch/tools/misc.py
def make_empty(
*size: Size,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> Iterable:
"""
Make an empty tensor.
Args:
size: Shape of the empty tensor to be created.
expected as multiple positional arguments of integers,
or as a single positional argument containing a tuple of
integers.
Note that when the user wishes to create an `ObjectArray`
(i.e. when `dtype` is given as `object`), then the size
is expected as a single integer, or as a single-element
tuple containing an integer (because `ObjectArray` can only
be one-dimensional).
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32) or, for creating an `ObjectArray`,
"object" (as string) or `object` or `Any`.
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
device: The device in which the new empty tensor will be stored.
If not specified, "cpu" will be used.
Returns:
The new empty tensor, which can be a PyTorch tensor or an
`ObjectArray`.
"""
from .objectarray import ObjectArray
if (dtype is not None) and is_dtype_object(dtype):
if (device is None) or (str(device) == "cpu"):
if len(size) == 1:
size = size[0]
return ObjectArray(size)
else:
return ValueError(
f"Invalid device for ObjectArray: {repr(device)}. Note: an ObjectArray can only be stored on 'cpu'."
)
else:
kwargs = {}
if dtype is not None:
kwargs["dtype"] = to_torch_dtype(dtype)
if device is not None:
kwargs["device"] = device
return torch.empty(*size, **kwargs)
make_gaussian(*size, *, center=None, stdev=None, symmetric=False, out=None, dtype=None, device=None, generator=None)
¶
Make a new or existing tensor filled by Gaussian distributed values. This function can work only with float dtypes.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with Gaussian distributed values. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor instead, then no positional argument is expected. |
() |
center |
Union[float, Iterable[float], torch.Tensor] |
Center point (i.e. mean) of the Gaussian distribution.
Can be a scalar, or a tensor.
If not specified, the center point will be taken as 0.
Note that, if one specifies |
None |
stdev |
Union[float, Iterable[float], torch.Tensor] |
Standard deviation for the Gaussian distributed values.
Can be a scalar, or a tensor.
If not specified, the standard deviation will be taken as 1.
Note that, if one specifies |
None |
symmetric |
bool |
Whether or not the values should be sampled in a symmetric (i.e. antithetic) manner. The default is False. |
False |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by Gaussian distributed
values. If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
generator |
Any |
Pseudo-random number generator to be used when sampling
the values. Can be a |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing the Gaussian distributed values. |
Source code in evotorch/tools/misc.py
def make_gaussian(
*size: Size,
center: Optional[RealOrVector] = None,
stdev: Optional[RealOrVector] = None,
symmetric: bool = False,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
generator: Any = None,
) -> torch.Tensor:
"""
Make a new or existing tensor filled by Gaussian distributed values.
This function can work only with float dtypes.
Args:
size: Size of the new tensor to be filled with Gaussian distributed
values. This can be given as multiple positional arguments, each
such positional argument being an integer, or as a single
positional argument of a tuple, the tuple containing multiple
integers. Note that, if the user wishes to fill an existing
tensor instead, then no positional argument is expected.
center: Center point (i.e. mean) of the Gaussian distribution.
Can be a scalar, or a tensor.
If not specified, the center point will be taken as 0.
Note that, if one specifies `center`, then `stdev` is also
expected to be explicitly specified.
stdev: Standard deviation for the Gaussian distributed values.
Can be a scalar, or a tensor.
If not specified, the standard deviation will be taken as 1.
Note that, if one specifies `stdev`, then `center` is also
expected to be explicitly specified.
symmetric: Whether or not the values should be sampled in a
symmetric (i.e. antithetic) manner.
The default is False.
out: Optionally, the tensor to be filled by Gaussian distributed
values. If an `out` tensor is given, then no `size` argument is
expected.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
generator: Pseudo-random number generator to be used when sampling
the values. Can be a `torch.Generator`, or an object with
a `generator` attribute (such as `Problem`).
If left as None, the global generator of PyTorch will be used.
Returns:
The created or modified tensor after placing the Gaussian
distributed values.
"""
scalar_requested = _scalar_requested(*size)
if scalar_requested:
size = (1,)
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
gen_kwargs = _generator_kwargs(generator)
if symmetric:
leftmost_dim = out.shape[0]
if (leftmost_dim % 2) != 0:
raise ValueError(
f"Symmetric sampling cannot be done if the leftmost dimension of the target tensor is odd."
f" The shape of the target tensor is: {repr(out.shape)}."
)
out[0::2, ...].normal_(**gen_kwargs)
out[1::2, ...] = out[0::2, ...]
out[1::2, ...] *= -1
else:
out.normal_(**gen_kwargs)
if (center is None) and (stdev is None):
pass # do nothing
elif (center is not None) and (stdev is not None):
stdev = torch.as_tensor(stdev, dtype=out.dtype, device=out.device)
out *= stdev
center = torch.as_tensor(center, dtype=out.dtype, device=out.device)
out += center
else:
raise ValueError(
f"Please either specify none of `stdev` and `center`, or both of them."
f" Currently, `center` is {center}"
f" and `stdev` is {stdev}."
)
if scalar_requested:
out = out[0]
return out
make_nan(*size, *, out=None, dtype=None, device=None)
¶
Make a new tensor filled with NaN, or fill an existing tensor with NaN.
The following example creates a float32 tensor filled with NaN values, of shape (3, 5):
nan_values = make_nan(3, 5, dtype="float32")
The following example fills an existing tensor with NaNs.
make_nan(out=existing_tensor)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with NaNs. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor with NaN values, then no positional argument is expected. |
() |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by NaN values.
If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing NaN values. |
Source code in evotorch/tools/misc.py
def make_nan(
*size: Size,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> torch.Tensor:
"""
Make a new tensor filled with NaN, or fill an existing tensor with NaN.
The following example creates a float32 tensor filled with NaN values,
of shape (3, 5):
nan_values = make_nan(3, 5, dtype="float32")
The following example fills an existing tensor with NaNs.
make_nan(out=existing_tensor)
Args:
size: Size of the new tensor to be filled with NaNs.
This can be given as multiple positional arguments, each such
positional argument being an integer, or as a single positional
argument of a tuple, the tuple containing multiple integers.
Note that, if the user wishes to fill an existing tensor with
NaN values, then no positional argument is expected.
out: Optionally, the tensor to be filled by NaN values.
If an `out` tensor is given, then no `size` argument is expected.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
Returns:
The created or modified tensor after placing NaN values.
"""
if _scalar_requested(*size):
return _scalar_tensor(float("nan"), out=out, dtype=dtype, device=device)
else:
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
out[:] = float("nan")
return out
make_ones(*size, *, out=None, dtype=None, device=None)
¶
Make a new tensor filled with 1, or fill an existing tensor with 1.
The following example creates a float32 tensor filled with 1 values, of shape (3, 5):
zero_values = make_ones(3, 5, dtype="float32")
The following example fills an existing tensor with 1s:
make_ones(out=existing_tensor)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with 1. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor with 1 values, then no positional argument is expected. |
() |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by 1 values.
If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing 1 values. |
Source code in evotorch/tools/misc.py
def make_ones(
*size: Size,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> torch.Tensor:
"""
Make a new tensor filled with 1, or fill an existing tensor with 1.
The following example creates a float32 tensor filled with 1 values,
of shape (3, 5):
zero_values = make_ones(3, 5, dtype="float32")
The following example fills an existing tensor with 1s:
make_ones(out=existing_tensor)
Args:
size: Size of the new tensor to be filled with 1.
This can be given as multiple positional arguments, each such
positional argument being an integer, or as a single positional
argument of a tuple, the tuple containing multiple integers.
Note that, if the user wishes to fill an existing tensor with
1 values, then no positional argument is expected.
out: Optionally, the tensor to be filled by 1 values.
If an `out` tensor is given, then no `size` argument is expected.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
Returns:
The created or modified tensor after placing 1 values.
"""
if _scalar_requested(*size):
return _scalar_tensor(1, out=out, dtype=dtype, device=device)
else:
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
out[:] = 1
return out
make_randint(*size, *, n, out=None, dtype=None, device=None, generator=None)
¶
Make a new or existing tensor filled by random integers.
The integers are uniformly distributed within [0 ... n-1]
.
This function can be used with integer or float dtypes.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with uniformly distributed values. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor instead, then no positional argument is expected. |
() |
n |
Union[int, float, torch.Tensor] |
Number of choice(s) for integer sampling.
The lowest possible value will be 0, and the highest possible
value will be n - 1.
|
required |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by the random integers.
If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "int64") or a PyTorch dtype
(e.g. torch.int64).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
generator |
Any |
Pseudo-random number generator to be used when sampling
the values. Can be a |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing the uniformly distributed values. |
Source code in evotorch/tools/misc.py
def make_randint(
*size: Size,
n: Union[int, float, torch.Tensor],
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
generator: Any = None,
) -> torch.Tensor:
"""
Make a new or existing tensor filled by random integers.
The integers are uniformly distributed within `[0 ... n-1]`.
This function can be used with integer or float dtypes.
Args:
size: Size of the new tensor to be filled with uniformly distributed
values. This can be given as multiple positional arguments, each
such positional argument being an integer, or as a single
positional argument of a tuple, the tuple containing multiple
integers. Note that, if the user wishes to fill an existing
tensor instead, then no positional argument is expected.
n: Number of choice(s) for integer sampling.
The lowest possible value will be 0, and the highest possible
value will be n - 1.
`n` can be a scalar, or a tensor.
out: Optionally, the tensor to be filled by the random integers.
If an `out` tensor is given, then no `size` argument is
expected.
dtype: Optionally a string (e.g. "int64") or a PyTorch dtype
(e.g. torch.int64).
If `dtype` is not specified, torch.int64 will be used.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
generator: Pseudo-random number generator to be used when sampling
the values. Can be a `torch.Generator`, or an object with
a `generator` attribute (such as `Problem`).
If left as None, the global generator of PyTorch will be used.
Returns:
The created or modified tensor after placing the uniformly
distributed values.
"""
scalar_requested = _scalar_requested(*size)
if scalar_requested:
size = (1,)
if (dtype is None) and (out is None):
dtype = torch.int64
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
gen_kwargs = _generator_kwargs(generator)
out.random_(**gen_kwargs)
out %= n
if scalar_requested:
out = out[0]
return out
make_tensor(data, *, dtype=None, device=None, read_only=False)
¶
Make a new tensor.
This function can be used to create PyTorch tensors, or ObjectArray instances with or without read-only behavior.
The following example creates a 2-dimensional PyTorch tensor:
my_tensor = make_tensor(
[[1, 2], [3, 4]],
dtype="float32", # alternatively, torch.float32
device="cpu",
)
The following example creates an ObjectArray from a list that contains arbitrary data:
my_obj_tensor = make_tensor(["a_string", (1, 2)], dtype=object)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data |
Any |
The data to be converted to a tensor.
If one wishes to create a PyTorch tensor, this can be anything
that can be stored by a PyTorch tensor.
If one wishes to create an |
required |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32"), or a PyTorch dtype
(e.g. torch.float32), or |
None |
device |
Union[str, torch.device] |
The device in which the tensor will be stored.
If |
None |
read_only |
bool |
Whether or not the created tensor will be read-only. By default, this is False. |
False |
Returns:
Type | Description |
---|---|
Iterable |
A PyTorch tensor or an ObjectArray. |
Source code in evotorch/tools/misc.py
def make_tensor(
data: Any,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
read_only: bool = False,
) -> Iterable:
"""
Make a new tensor.
This function can be used to create PyTorch tensors, or ObjectArray
instances with or without read-only behavior.
The following example creates a 2-dimensional PyTorch tensor:
my_tensor = make_tensor(
[[1, 2], [3, 4]],
dtype="float32", # alternatively, torch.float32
device="cpu",
)
The following example creates an ObjectArray from a list that contains
arbitrary data:
my_obj_tensor = make_tensor(["a_string", (1, 2)], dtype=object)
Args:
data: The data to be converted to a tensor.
If one wishes to create a PyTorch tensor, this can be anything
that can be stored by a PyTorch tensor.
If one wishes to create an `ObjectArray` and therefore passes
`dtype=object`, then the provided `data` is expected as an
`Iterable`.
dtype: Optionally a string (e.g. "float32"), or a PyTorch dtype
(e.g. torch.float32), or `object` or "object" (as a string)
or `Any` if one wishes to create an `ObjectArray`.
If `dtype` is not specified, it will be assumed that the user
wishes to create a PyTorch tensor (not an `ObjectArray`) and
then `dtype` will be inferred from the provided `data`
(according to the default behavior of PyTorch).
device: The device in which the tensor will be stored.
If `device` is not specified, it will be understood from the
given `data` (according to the default behavior of PyTorch).
read_only: Whether or not the created tensor will be read-only.
By default, this is False.
Returns:
A PyTorch tensor or an ObjectArray.
"""
from .objectarray import ObjectArray
from .readonlytensor import as_read_only_tensor
if (dtype is not None) and is_dtype_object(dtype):
data = list(data)
n = len(data)
result = ObjectArray(n)
result[:] = data
else:
kwargs = {}
if dtype is not None:
kwargs["dtype"] = to_torch_dtype(dtype)
if device is not None:
kwargs["device"] = device
result = torch.tensor(data, **kwargs)
if read_only:
result = as_read_only_tensor(result)
return result
make_uniform(*size, *, lb=None, ub=None, out=None, dtype=None, device=None, generator=None)
¶
Make a new or existing tensor filled by uniformly distributed values. Both lower and upper bounds are inclusive. This function can work with both float and int dtypes.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with uniformly distributed values. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor instead, then no positional argument is expected. |
() |
lb |
Union[float, Iterable[float], torch.Tensor] |
Lower bound for the uniformly distributed values.
Can be a scalar, or a tensor.
If not specified, the lower bound will be taken as 0.
Note that, if one specifies |
None |
ub |
Union[float, Iterable[float], torch.Tensor] |
Upper bound for the uniformly distributed values.
Can be a scalar, or a tensor.
If not specified, the upper bound will be taken as 1.
Note that, if one specifies |
None |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by uniformly distributed
values. If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
generator |
Any |
Pseudo-random number generator to be used when sampling
the values. Can be a |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing the uniformly distributed values. |
Source code in evotorch/tools/misc.py
def make_uniform(
*size: Size,
lb: Optional[RealOrVector] = None,
ub: Optional[RealOrVector] = None,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
generator: Any = None,
) -> torch.Tensor:
"""
Make a new or existing tensor filled by uniformly distributed values.
Both lower and upper bounds are inclusive.
This function can work with both float and int dtypes.
Args:
size: Size of the new tensor to be filled with uniformly distributed
values. This can be given as multiple positional arguments, each
such positional argument being an integer, or as a single
positional argument of a tuple, the tuple containing multiple
integers. Note that, if the user wishes to fill an existing
tensor instead, then no positional argument is expected.
lb: Lower bound for the uniformly distributed values.
Can be a scalar, or a tensor.
If not specified, the lower bound will be taken as 0.
Note that, if one specifies `lb`, then `ub` is also expected to
be explicitly specified.
ub: Upper bound for the uniformly distributed values.
Can be a scalar, or a tensor.
If not specified, the upper bound will be taken as 1.
Note that, if one specifies `ub`, then `lb` is also expected to
be explicitly specified.
out: Optionally, the tensor to be filled by uniformly distributed
values. If an `out` tensor is given, then no `size` argument is
expected.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
generator: Pseudo-random number generator to be used when sampling
the values. Can be a `torch.Generator`, or an object with
a `generator` attribute (such as `Problem`).
If left as None, the global generator of PyTorch will be used.
Returns:
The created or modified tensor after placing the uniformly
distributed values.
"""
scalar_requested = _scalar_requested(*size)
if scalar_requested:
size = (1,)
def _invalid_bound_args():
raise ValueError(
f"Expected both `lb` and `ub` as None, or both `lb` and `ub` as not None."
f" It appears that one of them is None, while the other is not."
f" lb: {repr(lb)}."
f" ub: {repr(ub)}."
)
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
gen_kwargs = _generator_kwargs(generator)
def _cast_bounds():
nonlocal lb, ub
lb = torch.as_tensor(lb, dtype=out.dtype, device=out.device)
ub = torch.as_tensor(ub, dtype=out.dtype, device=out.device)
if out.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
out.random_(**gen_kwargs)
if (lb is None) and (ub is None):
out %= 2
elif (lb is not None) and (ub is not None):
_cast_bounds()
diff = (ub - lb) + 1
out -= lb
out %= diff
out += lb
else:
_invalid_bound_args()
else:
out.uniform_(**gen_kwargs)
if (lb is None) and (ub is None):
pass # nothing to do
elif (lb is not None) and (ub is not None):
_cast_bounds()
diff = ub - lb
out *= diff
out += lb
else:
_invalid_bound_args()
if scalar_requested:
out = out[0]
return out
make_zeros(*size, *, out=None, dtype=None, device=None)
¶
Make a new tensor filled with 0, or fill an existing tensor with 0.
The following example creates a float32 tensor filled with 0 values, of shape (3, 5):
zero_values = make_zeros(3, 5, dtype="float32")
The following example fills an existing tensor with 0s:
make_zeros(out=existing_tensor)
Parameters:
Name | Type | Description | Default |
---|---|---|---|
size |
Union[int, torch.Size] |
Size of the new tensor to be filled with 0. This can be given as multiple positional arguments, each such positional argument being an integer, or as a single positional argument of a tuple, the tuple containing multiple integers. Note that, if the user wishes to fill an existing tensor with 0 values, then no positional argument is expected. |
() |
out |
Optional[torch.Tensor] |
Optionally, the tensor to be filled by 0 values.
If an |
None |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If |
None |
device |
Union[str, torch.device] |
The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an |
None |
Returns:
Type | Description |
---|---|
Tensor |
The created or modified tensor after placing 0 values. |
Source code in evotorch/tools/misc.py
def make_zeros(
*size: Size,
out: Optional[torch.Tensor] = None,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
) -> torch.Tensor:
"""
Make a new tensor filled with 0, or fill an existing tensor with 0.
The following example creates a float32 tensor filled with 0 values,
of shape (3, 5):
zero_values = make_zeros(3, 5, dtype="float32")
The following example fills an existing tensor with 0s:
make_zeros(out=existing_tensor)
Args:
size: Size of the new tensor to be filled with 0.
This can be given as multiple positional arguments, each such
positional argument being an integer, or as a single positional
argument of a tuple, the tuple containing multiple integers.
Note that, if the user wishes to fill an existing tensor with
0 values, then no positional argument is expected.
out: Optionally, the tensor to be filled by 0 values.
If an `out` tensor is given, then no `size` argument is expected.
dtype: Optionally a string (e.g. "float32") or a PyTorch dtype
(e.g. torch.float32).
If `dtype` is not specified, the default choice of
`torch.empty(...)` is used, that is, `torch.float32`.
If an `out` tensor is specified, then `dtype` is expected
as None.
device: The device in which the new tensor will be stored.
If not specified, "cpu" will be used.
If an `out` tensor is specified, then `device` is expected
as None.
Returns:
The created or modified tensor after placing 0 values.
"""
if _scalar_requested(*size):
return _scalar_tensor(0, out=out, dtype=dtype, device=device)
else:
out = _out_tensor(*size, out=out, dtype=dtype, device=device)
out.zero_()
return out
modify_tensor(original, target, lb=None, ub=None, max_change=None, in_place=False)
¶
Return the modified version of the original tensor, with bounds checking.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
original |
Tensor |
The original tensor. |
required |
target |
Tensor |
The target tensor which contains the values to replace the old ones in the original tensor. |
required |
lb |
Union[float, torch.Tensor] |
The lower bound(s), as a scalar or as an tensor. Values below these bounds are clipped in the resulting tensor. None means -inf. |
None |
ub |
Union[float, torch.Tensor] |
The upper bound(s), as a scalar or as an tensor. Value above these bounds are clipped in the resulting tensor. None means +inf. |
None |
max_change |
Union[float, torch.Tensor] |
The ratio of allowed change.
In more details, when given as a real number r,
modifications are allowed only within
|
None |
in_place |
bool |
Provide this as True if you wish the modification to be done within the original tensor. The default value of this argument is False, which means, the original tensor is not changed, and its modified version is returned as an independent copy. |
False |
Returns:
Type | Description |
---|---|
Tensor |
The modified tensor. |
Source code in evotorch/tools/misc.py
@torch.no_grad()
def modify_tensor(
original: torch.Tensor,
target: torch.Tensor,
lb: Optional[Union[float, torch.Tensor]] = None,
ub: Optional[Union[float, torch.Tensor]] = None,
max_change: Optional[Union[float, torch.Tensor]] = None,
in_place: bool = False,
) -> torch.Tensor:
"""Return the modified version of the original tensor, with bounds checking.
Args:
original: The original tensor.
target: The target tensor which contains the values to replace the
old ones in the original tensor.
lb: The lower bound(s), as a scalar or as an tensor.
Values below these bounds are clipped in the resulting tensor.
None means -inf.
ub: The upper bound(s), as a scalar or as an tensor.
Value above these bounds are clipped in the resulting tensor.
None means +inf.
max_change: The ratio of allowed change.
In more details, when given as a real number r,
modifications are allowed only within
``[original-(r*abs(original)) ... original+(r*abs(original))]``.
Modifications beyond this interval are clipped.
This argument can also be left as None if no such limitation
is needed.
in_place: Provide this as True if you wish the modification to be
done within the original tensor. The default value of this
argument is False, which means, the original tensor is not
changed, and its modified version is returned as an independent
copy.
Returns:
The modified tensor.
"""
if (lb is None) and (ub is None) and (max_change is None):
# If there is no restriction regarding how the tensor
# should be modified (no lb, no ub, no max_change),
# then we simply use the target values
# themselves for modifying the tensor.
if in_place:
original[:] = target
return original
else:
return target
else:
# If there are some restriction regarding how the tensor
# should be modified, then we turn to the following
# operations
def convert_to_tensor(x, tensorname: str):
if isinstance(x, torch.Tensor):
converted = x
else:
converted = torch.as_tensor(x, dtype=original.dtype, device=original.device)
if converted.ndim == 0 or converted.shape == original.shape:
return converted
else:
raise IndexError(
f"Argument {tensorname}: shape mismatch."
f" Shape of the original tensor: {original.shape}."
f" Shape of {tensorname}: {converted.shape}."
)
if lb is None:
# If lb is None, then it should be taken as -inf
lb = convert_to_tensor(float("-inf"), "lb")
else:
lb = convert_to_tensor(lb, "lb")
if ub is None:
# If ub is None, then it should be taken as +inf
ub = convert_to_tensor(float("inf"), "ub")
else:
ub = convert_to_tensor(ub, "ub")
if max_change is not None:
# If max_change is provided as something other than None,
# then we update the lb and ub so that they are tight
# enough to satisfy the max_change restriction.
max_change = convert_to_tensor(max_change, "max_change")
allowed_amounts = torch.abs(original) * max_change
allowed_lb = original - allowed_amounts
allowed_ub = original + allowed_amounts
lb = torch.max(lb, allowed_lb)
ub = torch.min(ub, allowed_ub)
## If in_place is given as True, the clipping (that we are about
## to perform), should be in-place.
# more_config = {}
# if in_place:
# more_config['out'] = original
#
## Return the clipped version of the target values
# return torch.clamp(target, lb, ub, **more_config)
result = torch.max(target, lb)
result = torch.min(result, ub)
if in_place:
original[:] = result
return original
else:
return result
numpy_copy(x, dtype)
¶
Return a numpy copy of the given iterable.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
x |
Iterable |
Any Iterable whose numpy copy will be returned. |
required |
dtype |
Union[str, torch.dtype, numpy.dtype, Type] |
The desired dtype. Can be given as a numpy dtype, as a torch dtype, or a native dtype (e.g. int, float), or as a string (e.g. "float32"). |
required |
Returns:
Type | Description |
---|---|
ndarray |
The numpy copy of the original iterable object. |
Source code in evotorch/tools/misc.py
def numpy_copy(x: Iterable, dtype: DType) -> np.ndarray:
"""
Return a numpy copy of the given iterable.
Args:
x: Any Iterable whose numpy copy will be returned.
dtype: The desired dtype. Can be given as a numpy dtype,
as a torch dtype, or a native dtype (e.g. int, float),
or as a string (e.g. "float32").
Returns:
The numpy copy of the original iterable object.
"""
dtype = to_numpy_dtype(dtype)
if isinstance(x, torch.Tensor):
x = x.cpu()
return np.array(x, dtype=dtype)
split_workload(workload, num_actors)
¶
Split a workload among actors.
By "workload" what is meant is the total amount of a work, this amount being expressed by an integer. For example, if the "work" is the evaluation of a population, the "workload" would usually be the population size.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
workload |
int |
Total amount of work, as an integer. |
required |
num_actors |
int |
Number of actors (i.e. remote workers) among which the workload will be distributed. |
required |
Returns:
Type | Description |
---|---|
list |
A list of integers. The i-th item of the returned list expresses the suggested workload for the i-th actor. |
Source code in evotorch/tools/misc.py
def split_workload(workload: int, num_actors: int) -> list:
"""
Split a workload among actors.
By "workload" what is meant is the total amount of a work,
this amount being expressed by an integer.
For example, if the "work" is the evaluation of a population,
the "workload" would usually be the population size.
Args:
workload: Total amount of work, as an integer.
num_actors: Number of actors (i.e. remote workers) among
which the workload will be distributed.
Returns:
A list of integers. The i-th item of the returned list
expresses the suggested workload for the i-th actor.
"""
base_workload = workload // num_actors
extra_workload = workload % num_actors
result = [base_workload] * num_actors
for i in range(extra_workload):
result[i] += 1
return result
stdev_from_radius(radius, solution_length)
¶
Get elementwise standard deviation from a given radius.
Sometimes, for a distribution-based search algorithm, the user might
choose to configure the initial coverage area of the search distribution
not via standard deviation, but via a radius value, as was done in the
study of Toklu et al. (2020).
This function takes the desired radius value and the solution length of
the problem at hand, and returns the elementwise standard deviation value.
Let us name this returned standard deviation value as s
.
When a new Gaussian distribution is constructed such that its initial
standard deviation is [s, s, s, ...]
(the length of this vector being
equal to the solution length), this constructed distribution's radius
corresponds with the desired radius.
Here, the "radius" of a Gaussian distribution is defined as the norm
of the standard deviation vector. In the case of a standard normal
distribution, this radius formulation serves as a simplified approximation
to E[||Normal(0, I)||]
(for which a closer approximation is used in
the study of Hansen & Ostermeier (2001)).
Reference:
Toklu, N.E., Liskowski, P., Srivastava, R.K. (2020).
ClipUp: A Simple and Powerful Optimizer
for Distribution-based Policy Evolution.
Parallel Problem Solving from Nature (PPSN 2020).
Nikolaus Hansen, Andreas Ostermeier (2001).
Completely Derandomized Self-Adaptation in Evolution Strategies.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
radius |
float |
The radius whose elementwise standard deviation counterpart will be returned. |
required |
solution_length |
int |
Length of a solution for the problem at hand. |
required |
Returns:
Type | Description |
---|---|
float |
An elementwise standard deviation value |
Source code in evotorch/tools/misc.py
def stdev_from_radius(radius: float, solution_length: int) -> float:
"""
Get elementwise standard deviation from a given radius.
Sometimes, for a distribution-based search algorithm, the user might
choose to configure the initial coverage area of the search distribution
not via standard deviation, but via a radius value, as was done in the
study of Toklu et al. (2020).
This function takes the desired radius value and the solution length of
the problem at hand, and returns the elementwise standard deviation value.
Let us name this returned standard deviation value as `s`.
When a new Gaussian distribution is constructed such that its initial
standard deviation is `[s, s, s, ...]` (the length of this vector being
equal to the solution length), this constructed distribution's radius
corresponds with the desired radius.
Here, the "radius" of a Gaussian distribution is defined as the norm
of the standard deviation vector. In the case of a standard normal
distribution, this radius formulation serves as a simplified approximation
to `E[||Normal(0, I)||]` (for which a closer approximation is used in
the study of Hansen & Ostermeier (2001)).
Reference:
Toklu, N.E., Liskowski, P., Srivastava, R.K. (2020).
ClipUp: A Simple and Powerful Optimizer
for Distribution-based Policy Evolution.
Parallel Problem Solving from Nature (PPSN 2020).
Nikolaus Hansen, Andreas Ostermeier (2001).
Completely Derandomized Self-Adaptation in Evolution Strategies.
Args:
radius: The radius whose elementwise standard deviation counterpart
will be returned.
solution_length: Length of a solution for the problem at hand.
Returns:
An elementwise standard deviation value `s`, such that a Gaussian
distribution constructed with the standard deviation `[s, s, s, ...]`
has the desired radius.
"""
radius = float(radius)
solution_length = int(solution_length)
return math.sqrt((radius**2) / solution_length)
to_numpy_dtype(dtype)
¶
Convert the given string or the given PyTorch dtype to a numpy dtype. If the argument is already a numpy dtype, then the argument is returned as it is.
Returns:
Type | Description |
---|---|
dtype |
The dtype, converted to a numpy dtype. |
Source code in evotorch/tools/misc.py
def to_numpy_dtype(dtype: DType) -> np.dtype:
"""
Convert the given string or the given PyTorch dtype to a numpy dtype.
If the argument is already a numpy dtype, then the argument is returned
as it is.
Returns:
The dtype, converted to a numpy dtype.
"""
if isinstance(dtype, torch.dtype):
return torch.tensor([], dtype=dtype).numpy().dtype
elif is_dtype_object(dtype):
return np.dtype(object)
elif isinstance(dtype, np.dtype):
return dtype
else:
return np.dtype(dtype)
to_stdev_init(*, solution_length, stdev_init=None, radius_init=None)
¶
Ask for both standard deviation and radius, return the standard deviation.
It is very common among the distribution-based search algorithms to ask for both standard deviation and for radius for initializing the coverage area of the search distribution. During their initialization phases, these algorithms must check which one the user provided (radius or standard deviation), and return the result as the standard deviation so that a Gaussian distribution can easily be constructed.
This function serves as a helper function for such search algorithms by performing these actions:
- If the user provided a standard deviation and not a radius, then this provided standard deviation is simply returned.
- If the user provided a radius and not a standard deviation, then this provided radius is converted to its standard deviation counterpart, and then returned.
- If both standard deviation and radius are missing, or they are both given at the same time, then an error is raised.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
solution_length |
int |
Length of a solution for the problem at hand. |
required |
stdev_init |
Union[float, Iterable[float], torch.Tensor] |
Standard deviation. If one wishes to provide a radius
instead, then |
None |
radius_init |
Union[float, Iterable[float], torch.Tensor] |
Radius. If one wishes to provide a standard deviation
instead, then |
None |
Returns:
Type | Description |
---|---|
Union[float, Iterable[float], torch.Tensor] |
The standard deviation for the search distribution to be constructed. |
Source code in evotorch/tools/misc.py
def to_stdev_init(
*,
solution_length: int,
stdev_init: Optional[RealOrVector] = None,
radius_init: Optional[RealOrVector] = None,
) -> RealOrVector:
"""
Ask for both standard deviation and radius, return the standard deviation.
It is very common among the distribution-based search algorithms to ask
for both standard deviation and for radius for initializing the coverage
area of the search distribution. During their initialization phases,
these algorithms must check which one the user provided (radius or
standard deviation), and return the result as the standard deviation
so that a Gaussian distribution can easily be constructed.
This function serves as a helper function for such search algorithms
by performing these actions:
- If the user provided a standard deviation and not a radius, then this
provided standard deviation is simply returned.
- If the user provided a radius and not a standard deviation, then this
provided radius is converted to its standard deviation counterpart,
and then returned.
- If both standard deviation and radius are missing, or they are both
given at the same time, then an error is raised.
Args:
solution_length: Length of a solution for the problem at hand.
stdev_init: Standard deviation. If one wishes to provide a radius
instead, then `stdev_init` is expected as None.
radius_init: Radius. If one wishes to provide a standard deviation
instead, then `radius_init` is expected as None.
Returns:
The standard deviation for the search distribution to be constructed.
"""
if (stdev_init is not None) and (radius_init is None):
return stdev_init
elif (stdev_init is None) and (radius_init is not None):
return stdev_from_radius(radius_init, solution_length)
elif (stdev_init is None) and (radius_init is None):
raise ValueError(
"Received both `stdev_init` and `radius_init` as None."
" Please provide a value either for `stdev_init` or for `radius_init`."
)
else:
raise ValueError(
"Found both `stdev_init` and `radius_init` with values other than None."
" Please provide a value either for `stdev_init` or for `radius_init`, but not for both."
)
to_torch_dtype(dtype)
¶
Convert the given string or the given numpy dtype to a PyTorch dtype. If the argument is already a PyTorch dtype, then the argument is returned as it is.
Returns:
Type | Description |
---|---|
dtype |
The dtype, converted to a PyTorch dtype. |
Source code in evotorch/tools/misc.py
def to_torch_dtype(dtype: DType) -> torch.dtype:
"""
Convert the given string or the given numpy dtype to a PyTorch dtype.
If the argument is already a PyTorch dtype, then the argument is returned
as it is.
Returns:
The dtype, converted to a PyTorch dtype.
"""
if isinstance(dtype, str) and hasattr(torch, dtype):
attrib_within_torch = getattr(torch, dtype)
else:
attrib_within_torch = None
if isinstance(attrib_within_torch, torch.dtype):
return attrib_within_torch
elif isinstance(dtype, torch.dtype):
return dtype
elif dtype is Any or dtype is object:
raise TypeError(f"Cannot make a numeric tensor with dtype {repr(dtype)}")
else:
return torch.from_numpy(np.array([], dtype=dtype)).dtype