Skip to content

run

openml.runs.run #

OpenMLRun #

OpenMLRun(task_id: int, flow_id: int | None, dataset_id: int | None, setup_string: str | None = None, output_files: dict[str, int] | None = None, setup_id: int | None = None, tags: list[str] | None = None, uploader: int | None = None, uploader_name: str | None = None, evaluations: dict | None = None, fold_evaluations: dict | None = None, sample_evaluations: dict | None = None, data_content: list[list] | None = None, trace: OpenMLRunTrace | None = None, model: object | None = None, task_type: str | None = None, task_evaluation_measure: str | None = None, flow_name: str | None = None, parameter_settings: list[dict[str, Any]] | None = None, predictions_url: str | None = None, task: OpenMLTask | None = None, flow: OpenMLFlow | None = None, run_id: int | None = None, description_text: str | None = None, run_details: str | None = None)

Bases: OpenMLBase

OpenML Run: result of running a model on an OpenML dataset.

PARAMETER DESCRIPTION
task_id

The ID of the OpenML task associated with the run.

TYPE: int

flow_id

The ID of the OpenML flow associated with the run.

TYPE: int | None

dataset_id

The ID of the OpenML dataset used for the run.

TYPE: int | None

setup_string

The setup string of the run.

TYPE: str | None DEFAULT: None

output_files

Specifies where each related file can be found.

TYPE: dict[str, int] | None DEFAULT: None

setup_id

An integer representing the ID of the setup used for the run.

TYPE: int | None DEFAULT: None

tags

Representing the tags associated with the run.

TYPE: list[str] | None DEFAULT: None

uploader

User ID of the uploader.

TYPE: int | None DEFAULT: None

uploader_name

The name of the person who uploaded the run.

TYPE: str | None DEFAULT: None

evaluations

Representing the evaluations of the run.

TYPE: dict | None DEFAULT: None

fold_evaluations

The evaluations of the run for each fold.

TYPE: dict | None DEFAULT: None

sample_evaluations

The evaluations of the run for each sample.

TYPE: dict | None DEFAULT: None

data_content

The predictions generated from executing this run.

TYPE: list[list] | None DEFAULT: None

trace

The trace containing information on internal model evaluations of this run.

TYPE: OpenMLRunTrace | None DEFAULT: None

model

The untrained model that was evaluated in the run.

TYPE: object | None DEFAULT: None

task_type

The type of the OpenML task associated with the run.

TYPE: str | None DEFAULT: None

task_evaluation_measure

The evaluation measure used for the task.

TYPE: str | None DEFAULT: None

flow_name

The name of the OpenML flow associated with the run.

TYPE: str | None DEFAULT: None

parameter_settings

Representing the parameter settings used for the run.

TYPE: list[dict[str, Any]] | None DEFAULT: None

predictions_url

The URL of the predictions file.

TYPE: str | None DEFAULT: None

task

An instance of the OpenMLTask class, representing the OpenML task associated with the run.

TYPE: OpenMLTask | None DEFAULT: None

flow

An instance of the OpenMLFlow class, representing the OpenML flow associated with the run.

TYPE: OpenMLFlow | None DEFAULT: None

run_id

The ID of the run.

TYPE: int | None DEFAULT: None

description_text

Description text to add to the predictions file. If left None, is set to the time the arff file is generated.

TYPE: str | None DEFAULT: None

run_details

Description of the run stored in the run meta-data.

TYPE: str | None DEFAULT: None

Source code in openml/runs/run.py
def __init__(  # noqa: PLR0913
    self,
    task_id: int,
    flow_id: int | None,
    dataset_id: int | None,
    setup_string: str | None = None,
    output_files: dict[str, int] | None = None,
    setup_id: int | None = None,
    tags: list[str] | None = None,
    uploader: int | None = None,
    uploader_name: str | None = None,
    evaluations: dict | None = None,
    fold_evaluations: dict | None = None,
    sample_evaluations: dict | None = None,
    data_content: list[list] | None = None,
    trace: OpenMLRunTrace | None = None,
    model: object | None = None,
    task_type: str | None = None,
    task_evaluation_measure: str | None = None,
    flow_name: str | None = None,
    parameter_settings: list[dict[str, Any]] | None = None,
    predictions_url: str | None = None,
    task: OpenMLTask | None = None,
    flow: OpenMLFlow | None = None,
    run_id: int | None = None,
    description_text: str | None = None,
    run_details: str | None = None,
):
    self.uploader = uploader
    self.uploader_name = uploader_name
    self.task_id = task_id
    self.task_type = task_type
    self.task_evaluation_measure = task_evaluation_measure
    self.flow_id = flow_id
    self.flow_name = flow_name
    self.setup_id = setup_id
    self.setup_string = setup_string
    self.parameter_settings = parameter_settings
    self.dataset_id = dataset_id
    self.evaluations = evaluations
    self.fold_evaluations = fold_evaluations
    self.sample_evaluations = sample_evaluations
    self.data_content = data_content
    self.output_files = output_files
    self.trace = trace
    self.error_message = None
    self.task = task
    self.flow = flow
    self.run_id = run_id
    self.model = model
    self.tags = tags
    self.predictions_url = predictions_url
    self.description_text = description_text
    self.run_details = run_details
    self._predictions = None

id property #

id: int | None

The ID of the run, None if not uploaded to the server yet.

openml_url property #

openml_url: str | None

The URL of the object on the server, if it was uploaded, else None.

predictions property #

predictions: DataFrame

Return a DataFrame with predictions for this run

from_filesystem classmethod #

from_filesystem(directory: str | Path, expect_model: bool = True) -> OpenMLRun

The inverse of the to_filesystem method. Instantiates an OpenMLRun object based on files stored on the file system.

PARAMETER DESCRIPTION
directory

a path leading to the folder where the results are stored

TYPE: str

expect_model

if True, it requires the model pickle to be present, and an error will be thrown if not. Otherwise, the model might or might not be present.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
run

the re-instantiated run object

TYPE: OpenMLRun

Source code in openml/runs/run.py
@classmethod
def from_filesystem(cls, directory: str | Path, expect_model: bool = True) -> OpenMLRun:  # noqa: FBT002
    """
    The inverse of the to_filesystem method. Instantiates an OpenMLRun
    object based on files stored on the file system.

    Parameters
    ----------
    directory : str
        a path leading to the folder where the results
        are stored

    expect_model : bool
        if True, it requires the model pickle to be present, and an error
        will be thrown if not. Otherwise, the model might or might not
        be present.

    Returns
    -------
    run : OpenMLRun
        the re-instantiated run object
    """
    # Avoiding cyclic imports
    import openml.runs.functions

    directory = Path(directory)
    if not directory.is_dir():
        raise ValueError("Could not find folder")

    description_path = directory / "description.xml"
    predictions_path = directory / "predictions.arff"
    trace_path = directory / "trace.arff"
    model_path = directory / "model.pkl"

    if not description_path.is_file():
        raise ValueError("Could not find description.xml")
    if not predictions_path.is_file():
        raise ValueError("Could not find predictions.arff")
    if (not model_path.is_file()) and expect_model:
        raise ValueError("Could not find model.pkl")

    with description_path.open() as fht:
        xml_string = fht.read()
    run = openml.runs.functions._create_run_from_xml(xml_string, from_server=False)

    if run.flow_id is None:
        flow = openml.flows.OpenMLFlow.from_filesystem(directory)
        run.flow = flow
        run.flow_name = flow.name

    with predictions_path.open() as fht:
        predictions = arff.load(fht)
        run.data_content = predictions["data"]

    if model_path.is_file():
        # note that it will load the model if the file exists, even if
        # expect_model is False
        with model_path.open("rb") as fhb:
            run.model = pickle.load(fhb)  # noqa: S301

    if trace_path.is_file():
        run.trace = openml.runs.OpenMLRunTrace._from_filesystem(trace_path)

    return run

get_metric_fn #

get_metric_fn(sklearn_fn: Callable, kwargs: dict | None = None) -> ndarray

Calculates metric scores based on predicted values. Assumes the run has been executed locally (and contains run_data). Furthermore, it assumes that the 'correct' or 'truth' attribute is specified in the arff (which is an optional field, but always the case for openml-python runs)

PARAMETER DESCRIPTION
sklearn_fn

a function pointer to a sklearn function that accepts y_true, y_pred and **kwargs

TYPE: function

kwargs

kwargs for the function

TYPE: dict DEFAULT: None

RETURNS DESCRIPTION
scores

metric results

TYPE: ndarray of scores of length num_folds * num_repeats

Source code in openml/runs/run.py
def get_metric_fn(self, sklearn_fn: Callable, kwargs: dict | None = None) -> np.ndarray:  # noqa: PLR0915, PLR0912, C901
    """Calculates metric scores based on predicted values. Assumes the
    run has been executed locally (and contains run_data). Furthermore,
    it assumes that the 'correct' or 'truth' attribute is specified in
    the arff (which is an optional field, but always the case for
    openml-python runs)

    Parameters
    ----------
    sklearn_fn : function
        a function pointer to a sklearn function that
        accepts ``y_true``, ``y_pred`` and ``**kwargs``
    kwargs : dict
        kwargs for the function

    Returns
    -------
    scores : ndarray of scores of length num_folds * num_repeats
        metric results
    """
    kwargs = kwargs if kwargs else {}
    if self.data_content is not None and self.task_id is not None:
        predictions_arff = self._generate_arff_dict()
    elif (self.output_files is not None) and ("predictions" in self.output_files):
        predictions_file_url = openml._api_calls._file_id_to_url(
            self.output_files["predictions"],
            "predictions.arff",
        )
        response = openml._api_calls._download_text_file(predictions_file_url)
        predictions_arff = arff.loads(response)
        # TODO: make this a stream reader
    else:
        raise ValueError(
            "Run should have been locally executed or contain outputfile reference.",
        )

    # Need to know more about the task to compute scores correctly
    task = get_task(self.task_id)

    attribute_names = [att[0] for att in predictions_arff["attributes"]]
    if (
        task.task_type_id in [TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE]
        and "correct" not in attribute_names
    ):
        raise ValueError('Attribute "correct" should be set for classification task runs')
    if task.task_type_id == TaskType.SUPERVISED_REGRESSION and "truth" not in attribute_names:
        raise ValueError('Attribute "truth" should be set for regression task runs')
    if task.task_type_id != TaskType.CLUSTERING and "prediction" not in attribute_names:
        raise ValueError('Attribute "prediction" should be set for supervised task runs')

    def _attribute_list_to_dict(attribute_list):  # type: ignore
        # convenience function: Creates a mapping to map from the name of
        # attributes present in the arff prediction file to their index.
        # This is necessary because the number of classes can be different
        # for different tasks.
        res = OrderedDict()
        for idx in range(len(attribute_list)):
            res[attribute_list[idx][0]] = idx
        return res

    attribute_dict = _attribute_list_to_dict(predictions_arff["attributes"])

    repeat_idx = attribute_dict["repeat"]
    fold_idx = attribute_dict["fold"]
    predicted_idx = attribute_dict["prediction"]  # Assume supervised task

    if task.task_type_id in (TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE):
        correct_idx = attribute_dict["correct"]
    elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:
        correct_idx = attribute_dict["truth"]
    has_samples = False
    if "sample" in attribute_dict:
        sample_idx = attribute_dict["sample"]
        has_samples = True

    if (
        predictions_arff["attributes"][predicted_idx][1]
        != predictions_arff["attributes"][correct_idx][1]
    ):
        pred = predictions_arff["attributes"][predicted_idx][1]
        corr = predictions_arff["attributes"][correct_idx][1]
        raise ValueError(
            f"Predicted and Correct do not have equal values: {pred!s} Vs. {corr!s}",
        )

    # TODO: these could be cached
    values_predict: dict[int, dict[int, dict[int, list[float]]]] = {}
    values_correct: dict[int, dict[int, dict[int, list[float]]]] = {}
    for _line_idx, line in enumerate(predictions_arff["data"]):
        rep = line[repeat_idx]
        fold = line[fold_idx]
        samp = line[sample_idx] if has_samples else 0

        if task.task_type_id in [
            TaskType.SUPERVISED_CLASSIFICATION,
            TaskType.LEARNING_CURVE,
        ]:
            prediction = predictions_arff["attributes"][predicted_idx][1].index(
                line[predicted_idx],
            )
            correct = predictions_arff["attributes"][predicted_idx][1].index(line[correct_idx])
        elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:
            prediction = line[predicted_idx]
            correct = line[correct_idx]
        if rep not in values_predict:
            values_predict[rep] = OrderedDict()
            values_correct[rep] = OrderedDict()
        if fold not in values_predict[rep]:
            values_predict[rep][fold] = OrderedDict()
            values_correct[rep][fold] = OrderedDict()
        if samp not in values_predict[rep][fold]:
            values_predict[rep][fold][samp] = []
            values_correct[rep][fold][samp] = []

        values_predict[rep][fold][samp].append(prediction)
        values_correct[rep][fold][samp].append(correct)

    scores = []
    for rep in values_predict:  # noqa: PLC0206
        for fold in values_predict[rep]:
            last_sample = len(values_predict[rep][fold]) - 1
            y_pred = values_predict[rep][fold][last_sample]
            y_true = values_correct[rep][fold][last_sample]
            scores.append(sklearn_fn(y_true, y_pred, **kwargs))
    return np.array(scores)

open_in_browser #

open_in_browser() -> None

Opens the OpenML web page corresponding to this object in your default browser.

Source code in openml/base.py
def open_in_browser(self) -> None:
    """Opens the OpenML web page corresponding to this object in your default browser."""
    if self.openml_url is None:
        raise ValueError(
            "Cannot open element on OpenML.org when attribute `openml_url` is `None`",
        )

    webbrowser.open(self.openml_url)

publish #

publish() -> OpenMLBase

Publish the object on the OpenML server.

Source code in openml/base.py
def publish(self) -> OpenMLBase:
    """Publish the object on the OpenML server."""
    file_elements = self._get_file_elements()

    if "description" not in file_elements:
        file_elements["description"] = self._to_xml()

    call = f"{_get_rest_api_type_alias(self)}/"
    response_text = openml._api_calls._perform_api_call(
        call,
        "post",
        file_elements=file_elements,
    )
    xml_response = xmltodict.parse(response_text)

    self._parse_publish_response(xml_response)
    return self

push_tag #

push_tag(tag: str) -> None

Annotates this entity with a tag on the server.

PARAMETER DESCRIPTION
tag

Tag to attach to the flow.

TYPE: str

Source code in openml/base.py
def push_tag(self, tag: str) -> None:
    """Annotates this entity with a tag on the server.

    Parameters
    ----------
    tag : str
        Tag to attach to the flow.
    """
    _tag_openml_base(self, tag)

remove_tag #

remove_tag(tag: str) -> None

Removes a tag from this entity on the server.

PARAMETER DESCRIPTION
tag

Tag to attach to the flow.

TYPE: str

Source code in openml/base.py
def remove_tag(self, tag: str) -> None:
    """Removes a tag from this entity on the server.

    Parameters
    ----------
    tag : str
        Tag to attach to the flow.
    """
    _tag_openml_base(self, tag, untag=True)

to_filesystem #

to_filesystem(directory: str | Path, store_model: bool = True) -> None

The inverse of the from_filesystem method. Serializes a run on the filesystem, to be uploaded later.

PARAMETER DESCRIPTION
directory

a path leading to the folder where the results will be stored. Should be empty

TYPE: str

store_model

if True, a model will be pickled as well. As this is the most storage expensive part, it is often desirable to not store the model.

TYPE: (bool, optional(default=True)) DEFAULT: True

Source code in openml/runs/run.py
def to_filesystem(
    self,
    directory: str | Path,
    store_model: bool = True,  # noqa: FBT002
) -> None:
    """
    The inverse of the from_filesystem method. Serializes a run
    on the filesystem, to be uploaded later.

    Parameters
    ----------
    directory : str
        a path leading to the folder where the results
        will be stored. Should be empty

    store_model : bool, optional (default=True)
        if True, a model will be pickled as well. As this is the most
        storage expensive part, it is often desirable to not store the
        model.
    """
    if self.data_content is None or self.model is None:
        raise ValueError("Run should have been executed (and contain model / predictions)")
    directory = Path(directory)
    directory.mkdir(exist_ok=True, parents=True)

    if any(directory.iterdir()):
        raise ValueError(f"Output directory {directory.expanduser().resolve()} should be empty")

    run_xml = self._to_xml()
    predictions_arff = arff.dumps(self._generate_arff_dict())

    # It seems like typing does not allow to define the same variable multiple times
    with (directory / "description.xml").open("w") as fh:
        fh.write(run_xml)
    with (directory / "predictions.arff").open("w") as fh:
        fh.write(predictions_arff)
    if store_model:
        with (directory / "model.pkl").open("wb") as fh_b:
            pickle.dump(self.model, fh_b)

    if self.flow_id is None and self.flow is not None:
        self.flow.to_filesystem(directory)

    if self.trace is not None:
        self.trace._to_filesystem(directory)

url_for_id classmethod #

url_for_id(id_: int) -> str

Return the OpenML URL for the object of the class entity with the given id.

Source code in openml/base.py
@classmethod
def url_for_id(cls, id_: int) -> str:
    """Return the OpenML URL for the object of the class entity with the given id."""
    # Sample url for a flow: openml.org/f/123
    return f"{openml.config.get_server_base_url()}/{cls._entity_letter()}/{id_}"