Skip to content

functions

openml.study.functions #

__list_studies #

__list_studies(api_call: str) -> DataFrame

Retrieves the list of OpenML studies and returns it in a dictionary or a Pandas DataFrame.

Parameters#

api_call : str The API call for retrieving the list of OpenML studies.

Returns#

pd.DataFrame A Pandas DataFrame of OpenML studies

Source code in openml/study/functions.py
def __list_studies(api_call: str) -> pd.DataFrame:
    """Retrieves the list of OpenML studies and
    returns it in a dictionary or a Pandas DataFrame.

    Parameters
    ----------
    api_call : str
        The API call for retrieving the list of OpenML studies.

    Returns
    -------
    pd.DataFrame
        A Pandas DataFrame of OpenML studies
    """
    xml_string = openml._api_calls._perform_api_call(api_call, "get")
    study_dict = xmltodict.parse(xml_string, force_list=("oml:study",))

    # Minimalistic check if the XML is useful
    assert isinstance(study_dict["oml:study_list"]["oml:study"], list), type(
        study_dict["oml:study_list"],
    )
    assert study_dict["oml:study_list"]["@xmlns:oml"] == "http://openml.org/openml", study_dict[
        "oml:study_list"
    ]["@xmlns:oml"]

    studies = {}
    for study_ in study_dict["oml:study_list"]["oml:study"]:
        # maps from xml name to a tuple of (dict name, casting fn)
        expected_fields = {
            "oml:id": ("id", int),
            "oml:alias": ("alias", str),
            "oml:main_entity_type": ("main_entity_type", str),
            "oml:benchmark_suite": ("benchmark_suite", int),
            "oml:name": ("name", str),
            "oml:status": ("status", str),
            "oml:creation_date": ("creation_date", str),
            "oml:creator": ("creator", int),
        }
        study_id = int(study_["oml:id"])
        current_study = {}
        for oml_field_name, (real_field_name, cast_fn) in expected_fields.items():
            if oml_field_name in study_:
                current_study[real_field_name] = cast_fn(study_[oml_field_name])
        current_study["id"] = int(current_study["id"])
        studies[study_id] = current_study

    return pd.DataFrame.from_dict(studies, orient="index")

attach_to_study #

attach_to_study(study_id: int, run_ids: list[int]) -> int

Attaches a set of runs to a study.

Parameters#

study_id : int OpenML id of the study

list (int)

List of entities to link to the collection

Returns#

int new size of the study (in terms of explicitly linked entities)

Source code in openml/study/functions.py
def attach_to_study(study_id: int, run_ids: list[int]) -> int:
    """Attaches a set of runs to a study.

    Parameters
    ----------
    study_id : int
        OpenML id of the study

    run_ids : list (int)
        List of entities to link to the collection

    Returns
    -------
    int
        new size of the study (in terms of explicitly linked entities)
    """
    # Interestingly, there's no need to tell the server about the entity type, it knows by itself
    result_xml = openml._api_calls._perform_api_call(
        call=f"study/{study_id}/attach",
        request_method="post",
        data={"ids": ",".join(str(x) for x in run_ids)},
    )
    result = xmltodict.parse(result_xml)["oml:study_attach"]
    return int(result["oml:linked_entities"])

attach_to_suite #

attach_to_suite(suite_id: int, task_ids: list[int]) -> int

Attaches a set of tasks to a benchmarking suite.

Parameters#

suite_id : int OpenML id of the study

list (int)

List of entities to link to the collection

Returns#

int new size of the suite (in terms of explicitly linked entities)

Source code in openml/study/functions.py
def attach_to_suite(suite_id: int, task_ids: list[int]) -> int:
    """Attaches a set of tasks to a benchmarking suite.

    Parameters
    ----------
    suite_id : int
        OpenML id of the study

    task_ids : list (int)
        List of entities to link to the collection

    Returns
    -------
    int
        new size of the suite (in terms of explicitly linked entities)
    """
    return attach_to_study(suite_id, task_ids)

create_benchmark_suite #

create_benchmark_suite(name: str, description: str, task_ids: list[int], alias: str | None = None) -> OpenMLBenchmarkSuite

Creates an OpenML benchmark suite (collection of entity types, where the tasks are the linked entity)

Parameters#

name : str the name of the study (meta-info) description : str brief description (meta-info) task_ids : list a list of task ids associated with this study more can be added later with attach_to_suite. alias : str (optional) a string ID, unique on server (url-friendly)

Returns#

OpenMLStudy A local OpenML study object (call publish method to upload to server)

Source code in openml/study/functions.py
def create_benchmark_suite(
    name: str,
    description: str,
    task_ids: list[int],
    alias: str | None = None,
) -> OpenMLBenchmarkSuite:
    """
    Creates an OpenML benchmark suite (collection of entity types, where
    the tasks are the linked entity)

    Parameters
    ----------
    name : str
        the name of the study (meta-info)
    description : str
        brief description (meta-info)
    task_ids : list
        a list of task ids associated with this study
        more can be added later with ``attach_to_suite``.
    alias : str (optional)
        a string ID, unique on server (url-friendly)

    Returns
    -------
    OpenMLStudy
        A local OpenML study object (call publish method to upload to server)
    """
    return OpenMLBenchmarkSuite(
        suite_id=None,
        alias=alias,
        name=name,
        description=description,
        status=None,
        creation_date=None,
        creator=None,
        tags=None,
        data=None,
        tasks=task_ids,
    )

create_study #

create_study(name: str, description: str, run_ids: list[int] | None = None, alias: str | None = None, benchmark_suite: int | None = None) -> OpenMLStudy

Creates an OpenML study (collection of data, tasks, flows, setups and run), where the runs are the main entity (collection consists of runs and all entities (flows, tasks, etc) that are related to these runs)

Parameters#

benchmark_suite : int (optional) the benchmark suite (another study) upon which this study is ran. name : str the name of the study (meta-info) description : str brief description (meta-info) run_ids : list, optional a list of run ids associated with this study, these can also be added later with attach_to_study. alias : str (optional) a string ID, unique on server (url-friendly) benchmark_suite: int (optional) the ID of the suite for which this study contains run results

Returns#

OpenMLStudy A local OpenML study object (call publish method to upload to server)

Source code in openml/study/functions.py
def create_study(
    name: str,
    description: str,
    run_ids: list[int] | None = None,
    alias: str | None = None,
    benchmark_suite: int | None = None,
) -> OpenMLStudy:
    """
    Creates an OpenML study (collection of data, tasks, flows, setups and run),
    where the runs are the main entity (collection consists of runs and all
    entities (flows, tasks, etc) that are related to these runs)

    Parameters
    ----------
    benchmark_suite : int (optional)
        the benchmark suite (another study) upon which this study is ran.
    name : str
        the name of the study (meta-info)
    description : str
        brief description (meta-info)
    run_ids : list, optional
        a list of run ids associated with this study,
        these can also be added later with ``attach_to_study``.
    alias : str (optional)
        a string ID, unique on server (url-friendly)
    benchmark_suite: int (optional)
        the ID of the suite for which this study contains run results

    Returns
    -------
    OpenMLStudy
        A local OpenML study object (call publish method to upload to server)
    """
    return OpenMLStudy(
        study_id=None,
        alias=alias,
        benchmark_suite=benchmark_suite,
        name=name,
        description=description,
        status=None,
        creation_date=None,
        creator=None,
        tags=None,
        data=None,
        tasks=None,
        flows=None,
        runs=run_ids if run_ids != [] else None,
        setups=None,
    )

delete_study #

delete_study(study_id: int) -> bool

Deletes a study from the OpenML server.

Parameters#

study_id : int OpenML id of the study

Returns#

bool True iff the deletion was successful. False otherwise

Source code in openml/study/functions.py
def delete_study(study_id: int) -> bool:
    """Deletes a study from the OpenML server.

    Parameters
    ----------
    study_id : int
        OpenML id of the study

    Returns
    -------
    bool
        True iff the deletion was successful. False otherwise
    """
    return openml.utils._delete_entity("study", study_id)

delete_suite #

delete_suite(suite_id: int) -> bool

Deletes a study from the OpenML server.

Parameters#

suite_id : int OpenML id of the study

Returns#

bool True iff the deletion was successful. False otherwise

Source code in openml/study/functions.py
def delete_suite(suite_id: int) -> bool:
    """Deletes a study from the OpenML server.

    Parameters
    ----------
    suite_id : int
        OpenML id of the study

    Returns
    -------
    bool
        True iff the deletion was successful. False otherwise
    """
    return delete_study(suite_id)

detach_from_study #

detach_from_study(study_id: int, run_ids: list[int]) -> int

Detaches a set of run ids from a study.

Parameters#

study_id : int OpenML id of the study

list (int)

List of entities to unlink from the collection

Returns#

int new size of the study (in terms of explicitly linked entities)

Source code in openml/study/functions.py
def detach_from_study(study_id: int, run_ids: list[int]) -> int:
    """Detaches a set of run ids from a study.

    Parameters
    ----------
    study_id : int
        OpenML id of the study

    run_ids : list (int)
        List of entities to unlink from the collection

    Returns
    -------
    int
        new size of the study (in terms of explicitly linked entities)
    """
    # Interestingly, there's no need to tell the server about the entity type, it knows by itself
    uri = "study/%d/detach" % study_id
    post_variables = {"ids": ",".join(str(x) for x in run_ids)}  # type: openml._api_calls.DATA_TYPE
    result_xml = openml._api_calls._perform_api_call(
        call=uri,
        request_method="post",
        data=post_variables,
    )
    result = xmltodict.parse(result_xml)["oml:study_detach"]
    return int(result["oml:linked_entities"])

detach_from_suite #

detach_from_suite(suite_id: int, task_ids: list[int]) -> int

Detaches a set of task ids from a suite.

Parameters#

suite_id : int OpenML id of the study

list (int)

List of entities to unlink from the collection

Returns#

int new size of the study (in terms of explicitly linked entities)

Source code in openml/study/functions.py
def detach_from_suite(suite_id: int, task_ids: list[int]) -> int:
    """Detaches a set of task ids from a suite.

    Parameters
    ----------
    suite_id : int
        OpenML id of the study

    task_ids : list (int)
        List of entities to unlink from the collection

    Returns
    -------
    int
    new size of the study (in terms of explicitly linked entities)
    """
    return detach_from_study(suite_id, task_ids)

get_study #

get_study(study_id: int | str, arg_for_backwards_compat: str | None = None) -> OpenMLStudy

Retrieves all relevant information of an OpenML study from the server.

Parameters#

study id : int, str study id (numeric or alias)

str, optional

The example given in arxiv.org/pdf/1708.03731.pdf uses an older version of the API which required specifying the type of study, i.e. tasks. We changed the implementation of studies since then and split them up into suites (collections of tasks) and studies (collections of runs) so this argument is no longer needed.

Returns#

OpenMLStudy The OpenML study object

Source code in openml/study/functions.py
def get_study(
    study_id: int | str,
    arg_for_backwards_compat: str | None = None,  # noqa: ARG001
) -> OpenMLStudy:  # F401
    """
    Retrieves all relevant information of an OpenML study from the server.

    Parameters
    ----------
    study id : int, str
        study id (numeric or alias)

    arg_for_backwards_compat : str, optional
        The example given in https://arxiv.org/pdf/1708.03731.pdf uses an older version of the
        API which required specifying the type of study, i.e. tasks. We changed the
        implementation of studies since then and split them up into suites (collections of tasks)
        and studies (collections of runs) so this argument is no longer needed.

    Returns
    -------
    OpenMLStudy
        The OpenML study object
    """
    if study_id == "OpenML100":
        message = (
            "It looks like you are running code from the OpenML100 paper. It still works, but lots "
            "of things have changed since then. Please use `get_suite('OpenML100')` instead."
        )
        warnings.warn(message, DeprecationWarning, stacklevel=2)
        openml.config.logger.warning(message)
        study = _get_study(study_id, entity_type="task")
        assert isinstance(study, OpenMLBenchmarkSuite)

        return study  # type: ignore

    study = _get_study(study_id, entity_type="run")
    assert isinstance(study, OpenMLStudy)
    return study

get_suite #

get_suite(suite_id: int | str) -> OpenMLBenchmarkSuite

Retrieves all relevant information of an OpenML benchmarking suite from the server.

Parameters#

study id : int, str study id (numeric or alias)

Returns#

OpenMLSuite The OpenML suite object

Source code in openml/study/functions.py
def get_suite(suite_id: int | str) -> OpenMLBenchmarkSuite:
    """
    Retrieves all relevant information of an OpenML benchmarking suite from the server.

    Parameters
    ----------
    study id : int, str
        study id (numeric or alias)

    Returns
    -------
    OpenMLSuite
        The OpenML suite object
    """
    study = _get_study(suite_id, entity_type="task")
    assert isinstance(study, OpenMLBenchmarkSuite)

    return study

list_studies #

list_studies(offset: int | None = None, size: int | None = None, status: str | None = None, uploader: list[str] | None = None, benchmark_suite: int | None = None) -> DataFrame

Return a list of all studies which are on OpenML.

Parameters#

offset : int, optional The number of studies to skip, starting from the first. size : int, optional The maximum number of studies to show. status : str, optional Should be {active, in_preparation, deactivated, all}. By default active studies are returned. uploader : list (int), optional Result filter. Will only return studies created by these users. benchmark_suite : int, optional

Returns#

datasets : dataframe Every dataset is represented by a dictionary containing the following information: - id - alias (optional) - name - benchmark_suite (optional) - status - creator - creation_date If qualities are calculated for the dataset, some of these are also returned.

Source code in openml/study/functions.py
def list_studies(
    offset: int | None = None,
    size: int | None = None,
    status: str | None = None,
    uploader: list[str] | None = None,
    benchmark_suite: int | None = None,
) -> pd.DataFrame:
    """
    Return a list of all studies which are on OpenML.

    Parameters
    ----------
    offset : int, optional
        The number of studies to skip, starting from the first.
    size : int, optional
        The maximum number of studies to show.
    status : str, optional
        Should be {active, in_preparation, deactivated, all}. By default active
        studies are returned.
    uploader : list (int), optional
        Result filter. Will only return studies created by these users.
    benchmark_suite : int, optional

    Returns
    -------
    datasets : dataframe
        Every dataset is represented by a dictionary containing
        the following information:
        - id
        - alias (optional)
        - name
        - benchmark_suite (optional)
        - status
        - creator
        - creation_date
        If qualities are calculated for the dataset, some of
        these are also returned.
    """
    listing_call = partial(
        _list_studies,
        main_entity_type="run",
        status=status,
        uploader=uploader,
        benchmark_suite=benchmark_suite,
    )
    batches = openml.utils._list_all(listing_call, offset=offset, limit=size)
    if len(batches) == 0:
        return pd.DataFrame()

    return pd.concat(batches)

list_suites #

list_suites(offset: int | None = None, size: int | None = None, status: str | None = None, uploader: list[int] | None = None) -> DataFrame

Return a list of all suites which are on OpenML.

Parameters#

offset : int, optional The number of suites to skip, starting from the first. size : int, optional The maximum number of suites to show. status : str, optional Should be {active, in_preparation, deactivated, all}. By default active suites are returned. uploader : list (int), optional Result filter. Will only return suites created by these users.

Returns#

datasets : dataframe Every row is represented by a dictionary containing the following information: - id - alias (optional) - name - main_entity_type - status - creator - creation_date

Source code in openml/study/functions.py
def list_suites(
    offset: int | None = None,
    size: int | None = None,
    status: str | None = None,
    uploader: list[int] | None = None,
) -> pd.DataFrame:
    """
    Return a list of all suites which are on OpenML.

    Parameters
    ----------
    offset : int, optional
        The number of suites to skip, starting from the first.
    size : int, optional
        The maximum number of suites to show.
    status : str, optional
        Should be {active, in_preparation, deactivated, all}. By default active
        suites are returned.
    uploader : list (int), optional
        Result filter. Will only return suites created by these users.

    Returns
    -------
    datasets : dataframe
        Every row is represented by a dictionary containing the following information:
        - id
        - alias (optional)
        - name
        - main_entity_type
        - status
        - creator
        - creation_date
    """
    listing_call = partial(
        _list_studies,
        main_entity_type="task",
        status=status,
        uploader=uploader,
    )
    batches = openml.utils._list_all(listing_call, limit=size, offset=offset)
    if len(batches) == 0:
        return pd.DataFrame()

    return pd.concat(batches)

update_study_status #

update_study_status(study_id: int, status: str) -> None

Updates the status of a study to either 'active' or 'deactivated'.

Parameters#

study_id : int The data id of the dataset status : str, 'active' or 'deactivated'

Source code in openml/study/functions.py
def update_study_status(study_id: int, status: str) -> None:
    """
    Updates the status of a study to either 'active' or 'deactivated'.

    Parameters
    ----------
    study_id : int
        The data id of the dataset
    status : str,
        'active' or 'deactivated'
    """
    legal_status = {"active", "deactivated"}
    if status not in legal_status:
        raise ValueError(f"Illegal status value. Legal values: {legal_status}")
    data = {"study_id": study_id, "status": status}  # type: openml._api_calls.DATA_TYPE
    result_xml = openml._api_calls._perform_api_call("study/status/update", "post", data=data)
    result = xmltodict.parse(result_xml)
    server_study_id = result["oml:study_status_update"]["oml:id"]
    server_status = result["oml:study_status_update"]["oml:status"]
    if status != server_status or int(study_id) != int(server_study_id):
        # This should never happen
        raise ValueError("Study id/status does not collide")

update_suite_status #

update_suite_status(suite_id: int, status: str) -> None

Updates the status of a study to either 'active' or 'deactivated'.

Parameters#

suite_id : int The data id of the dataset status : str, 'active' or 'deactivated'

Source code in openml/study/functions.py
def update_suite_status(suite_id: int, status: str) -> None:
    """
    Updates the status of a study to either 'active' or 'deactivated'.

    Parameters
    ----------
    suite_id : int
        The data id of the dataset
    status : str,
        'active' or 'deactivated'
    """
    return update_study_status(suite_id, status)