Skip to content

evotorch.logging

This module contains logging utilities.

Logger

Base class for all logging classes.

Source code in evotorch/logging.py
class Logger:
    """Base class for all logging classes."""

    def __init__(self, searcher: SearchAlgorithm, *, interval: int = 1, after_first_step: bool = False):
        """`__init__(...)`: Initialize the Logger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and so on.
                On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31, and so
                on.
        """
        searcher.log_hook.append(self)
        self._interval = int(interval)
        self._after_first_step = bool(after_first_step)
        self._steps_count = 0

    def __call__(self, status: dict):
        if self._after_first_step:
            n = self._steps_count
            self._steps_count += 1
        else:
            self._steps_count += 1
            n = self._steps_count

        if (n % self._interval) == 0:
            self._log(self._filter(status))

    def _filter(self, status: dict) -> dict:
        return status

    def _log(self, status: dict):
        raise NotImplementedError

__init__(self, searcher, *, interval=1, after_first_step=False) special

__init__(...): Initialize the Logger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
Source code in evotorch/logging.py
def __init__(self, searcher: SearchAlgorithm, *, interval: int = 1, after_first_step: bool = False):
    """`__init__(...)`: Initialize the Logger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and so on.
            On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31, and so
            on.
    """
    searcher.log_hook.append(self)
    self._interval = int(interval)
    self._after_first_step = bool(after_first_step)
    self._steps_count = 0

MlflowLogger (ScalarLogger)

A logger which stores the status via Mlflow.

Source code in evotorch/logging.py
class MlflowLogger(ScalarLogger):
    """A logger which stores the status via Mlflow."""

    def __init__(
        self,
        searcher: SearchAlgorithm,
        client: Optional[mlflow.tracking.MlflowClient] = None,
        run: Union[mlflow.entities.Run, Optional[MlflowID]] = None,
        *,
        interval: int = 1,
        after_first_step: bool = False,
    ):
        """`__init__(...)`: Initialize the MlflowLogger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            client: The MlflowClient object whose log_metric() method
                will be used for logging. This can be passed as None,
                in which case mlflow.log_metrics() will be used instead.
                Please note that, if a client is provided, the `run`
                argument is required as well.
            run: Expected only if a client is provided.
                This is the mlflow Run object (an instance of
                mlflow.entities.Run), or the ID of the mlflow run.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and
                so on. On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31,
                and so on.
        """

        super().__init__(searcher, interval=interval, after_first_step=after_first_step)

        self._client = client
        self._run_id: Optional[MlflowID] = None

        if self._client is None:
            if run is not None:
                raise ValueError("Received `run`, but `client` is missing")
        else:
            if run is None:
                raise ValueError("Received `client`, but `run` is missing")
            if isinstance(run, mlflow.entities.Run):
                self._run_id = run.info.run_id
            else:
                self._run_id = run

    def _log(self, status: dict):
        if self._client is None:
            mlflow.log_metrics(status, step=self._steps_count)
        else:
            for k, v in status.items():
                self._client.log_metric(self._run_id, k, v, step=self._steps_count)

__init__(self, searcher, client=None, run=None, *, interval=1, after_first_step=False) special

__init__(...): Initialize the MlflowLogger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
client Optional[mlflow.tracking.client.MlflowClient]

The MlflowClient object whose log_metric() method will be used for logging. This can be passed as None, in which case mlflow.log_metrics() will be used instead. Please note that, if a client is provided, the run argument is required as well.

None
run Union[mlflow.entities.run.Run, str, bytes, int]

Expected only if a client is provided. This is the mlflow Run object (an instance of mlflow.entities.Run), or the ID of the mlflow run.

None
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
Source code in evotorch/logging.py
def __init__(
    self,
    searcher: SearchAlgorithm,
    client: Optional[mlflow.tracking.MlflowClient] = None,
    run: Union[mlflow.entities.Run, Optional[MlflowID]] = None,
    *,
    interval: int = 1,
    after_first_step: bool = False,
):
    """`__init__(...)`: Initialize the MlflowLogger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        client: The MlflowClient object whose log_metric() method
            will be used for logging. This can be passed as None,
            in which case mlflow.log_metrics() will be used instead.
            Please note that, if a client is provided, the `run`
            argument is required as well.
        run: Expected only if a client is provided.
            This is the mlflow Run object (an instance of
            mlflow.entities.Run), or the ID of the mlflow run.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and
            so on. On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31,
            and so on.
    """

    super().__init__(searcher, interval=interval, after_first_step=after_first_step)

    self._client = client
    self._run_id: Optional[MlflowID] = None

    if self._client is None:
        if run is not None:
            raise ValueError("Received `run`, but `client` is missing")
    else:
        if run is None:
            raise ValueError("Received `client`, but `run` is missing")
        if isinstance(run, mlflow.entities.Run):
            self._run_id = run.info.run_id
        else:
            self._run_id = run

NeptuneLogger (ScalarLogger)

A logger which stores the status via neptune.

Source code in evotorch/logging.py
class NeptuneLogger(ScalarLogger):
    """A logger which stores the status via neptune."""

    def __init__(
        self,
        searcher: SearchAlgorithm,
        run,
        *,
        interval: int = 1,
        after_first_step: bool = False,
        group: Optional[str] = None,
    ):
        """`__init__(...)`: Initialize the NeptuneLogger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            run: A `neptune.new.run.Run` instance using which the status
                will be logged.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and so on.
                On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31, and so
                on.
            group: Into which group will the metrics be stored.
                For example, if the status keys to be logged are "score" and
                "elapsed", and `group` is set as "training", then the metrics
                will be sent to neptune with the keys "training/score" and
                "training/elapsed". `group` can also be left as None,
                in which case the status will be sent to neptune with the
                key names unchanged.
        """
        super().__init__(searcher, interval=interval, after_first_step=after_first_step)
        self._run = run
        self._group = group

    def _log(self, status: dict):
        for k, v in status.items():
            target_key = k if self._group is None else self._group + "/" + k
            self._run[target_key].log(v)

__init__(self, searcher, run, *, interval=1, after_first_step=False, group=None) special

__init__(...): Initialize the NeptuneLogger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
run

A neptune.new.run.Run instance using which the status will be logged.

required
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
group Optional[str]

Into which group will the metrics be stored. For example, if the status keys to be logged are "score" and "elapsed", and group is set as "training", then the metrics will be sent to neptune with the keys "training/score" and "training/elapsed". group can also be left as None, in which case the status will be sent to neptune with the key names unchanged.

None
Source code in evotorch/logging.py
def __init__(
    self,
    searcher: SearchAlgorithm,
    run,
    *,
    interval: int = 1,
    after_first_step: bool = False,
    group: Optional[str] = None,
):
    """`__init__(...)`: Initialize the NeptuneLogger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        run: A `neptune.new.run.Run` instance using which the status
            will be logged.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and so on.
            On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31, and so
            on.
        group: Into which group will the metrics be stored.
            For example, if the status keys to be logged are "score" and
            "elapsed", and `group` is set as "training", then the metrics
            will be sent to neptune with the keys "training/score" and
            "training/elapsed". `group` can also be left as None,
            in which case the status will be sent to neptune with the
            key names unchanged.
    """
    super().__init__(searcher, interval=interval, after_first_step=after_first_step)
    self._run = run
    self._group = group

PandasLogger (ScalarLogger)

A logger which collects status information and generates a pandas.DataFrame at the end.

Source code in evotorch/logging.py
class PandasLogger(ScalarLogger):
    """A logger which collects status information and
    generates a pandas.DataFrame at the end.
    """

    def __init__(self, searcher: SearchAlgorithm, *, interval: int = 1, after_first_step: bool = False):
        """`__init__(...)`: Initialize the PandasLogger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and
                so on. On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31, and
                so on.
        """
        super().__init__(searcher, interval=interval, after_first_step=after_first_step)
        self._data = []

    def _log(self, status: dict):
        self._data.append(deepcopy(status))

    def to_dataframe(self, *, index: Optional[str] = "iter") -> pandas.DataFrame:
        """Generate a pandas.DataFrame from the collected
        status information.

        Args:
            index: The column to be set as the index.
                If passed as None, then no index will be set.
                The default is "iter".
        """
        result = pandas.DataFrame(self._data)
        if index is not None:
            result.set_index(index, inplace=True)
        return result

__init__(self, searcher, *, interval=1, after_first_step=False) special

__init__(...): Initialize the PandasLogger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
Source code in evotorch/logging.py
def __init__(self, searcher: SearchAlgorithm, *, interval: int = 1, after_first_step: bool = False):
    """`__init__(...)`: Initialize the PandasLogger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and
            so on. On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31, and
            so on.
    """
    super().__init__(searcher, interval=interval, after_first_step=after_first_step)
    self._data = []

to_dataframe(self, *, index='iter')

Generate a pandas.DataFrame from the collected status information.

Parameters:

Name Type Description Default
index Optional[str]

The column to be set as the index. If passed as None, then no index will be set. The default is "iter".

'iter'
Source code in evotorch/logging.py
def to_dataframe(self, *, index: Optional[str] = "iter") -> pandas.DataFrame:
    """Generate a pandas.DataFrame from the collected
    status information.

    Args:
        index: The column to be set as the index.
            If passed as None, then no index will be set.
            The default is "iter".
    """
    result = pandas.DataFrame(self._data)
    if index is not None:
        result.set_index(index, inplace=True)
    return result

SacredLogger (ScalarLogger)

A logger which stores the status via the Run object of sacred.

Source code in evotorch/logging.py
class SacredLogger(ScalarLogger):
    """A logger which stores the status via the Run object of sacred."""

    def __init__(
        self,
        searcher: SearchAlgorithm,
        run: ExpOrRun,
        result: Optional[str] = None,
        *,
        interval: int = 1,
        after_first_step: bool = False,
    ):
        """`__init__(...)`: Initialize the SacredLogger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            run: An instance of `sacred.run.Run` or `sacred.Experiment`,
                using which the progress will be logged.
            result: The key in the status dictionary whose associated
                value will be registered as the current result
                of the experiment.
                If left as None, no result will be registered.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and
                so on. On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31,
                and so on.
        """
        super().__init__(searcher, interval=interval, after_first_step=after_first_step)
        self._result = result
        self._run = run

    def _log(self, status: dict):
        for k, v in status.items():
            self._run.log_scalar(k, v)
        if self._result is not None:
            self._run.result = status[self._result]

__init__(self, searcher, run, result=None, *, interval=1, after_first_step=False) special

__init__(...): Initialize the SacredLogger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
run Union[sacred.experiment.Experiment, sacred.run.Run]

An instance of sacred.run.Run or sacred.Experiment, using which the progress will be logged.

required
result Optional[str]

The key in the status dictionary whose associated value will be registered as the current result of the experiment. If left as None, no result will be registered.

None
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
Source code in evotorch/logging.py
def __init__(
    self,
    searcher: SearchAlgorithm,
    run: ExpOrRun,
    result: Optional[str] = None,
    *,
    interval: int = 1,
    after_first_step: bool = False,
):
    """`__init__(...)`: Initialize the SacredLogger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        run: An instance of `sacred.run.Run` or `sacred.Experiment`,
            using which the progress will be logged.
        result: The key in the status dictionary whose associated
            value will be registered as the current result
            of the experiment.
            If left as None, no result will be registered.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and
            so on. On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31,
            and so on.
    """
    super().__init__(searcher, interval=interval, after_first_step=after_first_step)
    self._result = result
    self._run = run

StdOutLogger (ScalarLogger)

A logger which prints the status into the screen.

Source code in evotorch/logging.py
class StdOutLogger(ScalarLogger):
    """A logger which prints the status into the screen."""

    def __init__(
        self,
        searcher: SearchAlgorithm,
        *,
        interval: int = 1,
        after_first_step: bool = False,
        leading_keys: Iterable[str] = ("iter",),
    ):
        """`__init__(...)`: Initialize the StdOutLogger.

        Args:
            searcher: The evolutionary algorithm instance whose progress
                is to be logged.
            interval: Expected as an integer n.
                Logging is to be done at every n iterations.
            after_first_step: Expected as a boolean.
                Meaningful only if interval is set as an integer greater
                than 1. Let us suppose that interval is set as 10.
                If after_first_step is False (which is the default),
                then the logging will be done at steps 10, 20, 30, and so on.
                On the other hand, if after_first_step is True,
                then the logging will be done at steps 1, 11, 21, 31, and so
                on.
            leading_keys: A sequence of strings where each string is a status
                key. When printing the status, these keys will be shown first.
        """
        super().__init__(searcher, interval=interval, after_first_step=after_first_step)
        self._leading_keys = list(leading_keys)
        self._leading_keys_set = set(self._leading_keys)

    def _log(self, status: dict):
        max_key_length = max([len(str(k)) for k in status.keys()])

        def report(k, v):
            nonlocal max_key_length
            print(str(k).rjust(max_key_length), ":", v)

        for k in self._leading_keys:
            if k in status:
                v = status[k]
                report(k, v)
        for k, v in status.items():
            if k not in self._leading_keys_set:
                report(k, v)
        print()

__init__(self, searcher, *, interval=1, after_first_step=False, leading_keys=('iter',)) special

__init__(...): Initialize the StdOutLogger.

Parameters:

Name Type Description Default
searcher SearchAlgorithm

The evolutionary algorithm instance whose progress is to be logged.

required
interval int

Expected as an integer n. Logging is to be done at every n iterations.

1
after_first_step bool

Expected as a boolean. Meaningful only if interval is set as an integer greater than 1. Let us suppose that interval is set as 10. If after_first_step is False (which is the default), then the logging will be done at steps 10, 20, 30, and so on. On the other hand, if after_first_step is True, then the logging will be done at steps 1, 11, 21, 31, and so on.

False
leading_keys Iterable[str]

A sequence of strings where each string is a status key. When printing the status, these keys will be shown first.

('iter',)
Source code in evotorch/logging.py
def __init__(
    self,
    searcher: SearchAlgorithm,
    *,
    interval: int = 1,
    after_first_step: bool = False,
    leading_keys: Iterable[str] = ("iter",),
):
    """`__init__(...)`: Initialize the StdOutLogger.

    Args:
        searcher: The evolutionary algorithm instance whose progress
            is to be logged.
        interval: Expected as an integer n.
            Logging is to be done at every n iterations.
        after_first_step: Expected as a boolean.
            Meaningful only if interval is set as an integer greater
            than 1. Let us suppose that interval is set as 10.
            If after_first_step is False (which is the default),
            then the logging will be done at steps 10, 20, 30, and so on.
            On the other hand, if after_first_step is True,
            then the logging will be done at steps 1, 11, 21, 31, and so
            on.
        leading_keys: A sequence of strings where each string is a status
            key. When printing the status, these keys will be shown first.
    """
    super().__init__(searcher, interval=interval, after_first_step=after_first_step)
    self._leading_keys = list(leading_keys)
    self._leading_keys_set = set(self._leading_keys)