Skip to content

datasets

openml.datasets #

OpenMLDataFeature #

OpenMLDataFeature(index: int, name: str, data_type: str, nominal_values: list[str], number_missing_values: int, ontologies: list[str] | None = None)

Data Feature (a.k.a. Attribute) object.

Parameters#

index : int The index of this feature name : str Name of the feature data_type : str can be nominal, numeric, string, date (corresponds to arff) nominal_values : list(str) list of the possible values, in case of nominal attribute number_missing_values : int Number of rows that have a missing value for this feature. ontologies : list(str) list of ontologies attached to this feature. An ontology describes the concept that are described in a feature. An ontology is defined by an URL where the information is provided.

Source code in openml/datasets/data_feature.py
def __init__(  # noqa: PLR0913
    self,
    index: int,
    name: str,
    data_type: str,
    nominal_values: list[str],
    number_missing_values: int,
    ontologies: list[str] | None = None,
):
    if not isinstance(index, int):
        raise TypeError(f"Index must be `int` but is {type(index)}")

    if data_type not in self.LEGAL_DATA_TYPES:
        raise ValueError(
            f"data type should be in {self.LEGAL_DATA_TYPES!s}, found: {data_type}",
        )

    if data_type == "nominal":
        if nominal_values is None:
            raise TypeError(
                "Dataset features require attribute `nominal_values` for nominal "
                "feature type.",
            )

        if not isinstance(nominal_values, list):
            raise TypeError(
                "Argument `nominal_values` is of wrong datatype, should be list, "
                f"but is {type(nominal_values)}",
            )
    elif nominal_values is not None:
        raise TypeError("Argument `nominal_values` must be None for non-nominal feature.")

    if not isinstance(number_missing_values, int):
        msg = f"number_missing_values must be int but is {type(number_missing_values)}"
        raise TypeError(msg)

    self.index = index
    self.name = str(name)
    self.data_type = str(data_type)
    self.nominal_values = nominal_values
    self.number_missing_values = number_missing_values
    self.ontologies = ontologies

OpenMLDataset #

OpenMLDataset(name: str, description: str | None, data_format: Literal['arff', 'sparse_arff'] = 'arff', cache_format: Literal['feather', 'pickle'] = 'pickle', dataset_id: int | None = None, version: int | None = None, creator: str | None = None, contributor: str | None = None, collection_date: str | None = None, upload_date: str | None = None, language: str | None = None, licence: str | None = None, url: str | None = None, default_target_attribute: str | None = None, row_id_attribute: str | None = None, ignore_attribute: str | list[str] | None = None, version_label: str | None = None, citation: str | None = None, tag: str | None = None, visibility: str | None = None, original_data_url: str | None = None, paper_url: str | None = None, update_comment: str | None = None, md5_checksum: str | None = None, data_file: str | None = None, features_file: str | None = None, qualities_file: str | None = None, dataset: str | None = None, parquet_url: str | None = None, parquet_file: str | None = None)

Bases: OpenMLBase

Dataset object.

Allows fetching and uploading datasets to OpenML.

Parameters#

name : str Name of the dataset. description : str Description of the dataset. data_format : str Format of the dataset which can be either 'arff' or 'sparse_arff'. cache_format : str Format for caching the dataset which can be either 'feather' or 'pickle'. dataset_id : int, optional Id autogenerated by the server. version : int, optional Version of this dataset. '1' for original version. Auto-incremented by server. creator : str, optional The person who created the dataset. contributor : str, optional People who contributed to the current version of the dataset. collection_date : str, optional The date the data was originally collected, given by the uploader. upload_date : str, optional The date-time when the dataset was uploaded, generated by server. language : str, optional Language in which the data is represented. Starts with 1 upper case letter, rest lower case, e.g. 'English'. licence : str, optional License of the data. url : str, optional Valid URL, points to actual data file. The file can be on the OpenML server or another dataset repository. default_target_attribute : str, optional The default target attribute, if it exists. Can have multiple values, comma separated. row_id_attribute : str, optional The attribute that represents the row-id column, if present in the dataset. ignore_attribute : str | list, optional Attributes that should be excluded in modelling, such as identifiers and indexes. version_label : str, optional Version label provided by user. Can be a date, hash, or some other type of id. citation : str, optional Reference(s) that should be cited when building on this data. tag : str, optional Tags, describing the algorithms. visibility : str, optional Who can see the dataset. Typical values: 'Everyone','All my friends','Only me'. Can also be any of the user's circles. original_data_url : str, optional For derived data, the url to the original dataset. paper_url : str, optional Link to a paper describing the dataset. update_comment : str, optional An explanation for when the dataset is uploaded. md5_checksum : str, optional MD5 checksum to check if the dataset is downloaded without corruption. data_file : str, optional Path to where the dataset is located. features_file : dict, optional A dictionary of dataset features, which maps a feature index to a OpenMLDataFeature. qualities_file : dict, optional A dictionary of dataset qualities, which maps a quality name to a quality value. dataset: string, optional Serialized arff dataset string. parquet_url: string, optional This is the URL to the storage location where the dataset files are hosted. This can be a MinIO bucket URL. If specified, the data will be accessed from this URL when reading the files. parquet_file: string, optional Path to the local file.

Source code in openml/datasets/dataset.py
def __init__(  # noqa: C901, PLR0912, PLR0913, PLR0915
    self,
    name: str,
    description: str | None,
    data_format: Literal["arff", "sparse_arff"] = "arff",
    cache_format: Literal["feather", "pickle"] = "pickle",
    dataset_id: int | None = None,
    version: int | None = None,
    creator: str | None = None,
    contributor: str | None = None,
    collection_date: str | None = None,
    upload_date: str | None = None,
    language: str | None = None,
    licence: str | None = None,
    url: str | None = None,
    default_target_attribute: str | None = None,
    row_id_attribute: str | None = None,
    ignore_attribute: str | list[str] | None = None,
    version_label: str | None = None,
    citation: str | None = None,
    tag: str | None = None,
    visibility: str | None = None,
    original_data_url: str | None = None,
    paper_url: str | None = None,
    update_comment: str | None = None,
    md5_checksum: str | None = None,
    data_file: str | None = None,
    features_file: str | None = None,
    qualities_file: str | None = None,
    dataset: str | None = None,
    parquet_url: str | None = None,
    parquet_file: str | None = None,
):
    if cache_format not in ["feather", "pickle"]:
        raise ValueError(
            "cache_format must be one of 'feather' or 'pickle. "
            f"Invalid format specified: {cache_format}",
        )

    def find_invalid_characters(string: str, pattern: str) -> str:
        invalid_chars = set()
        regex = re.compile(pattern)
        for char in string:
            if not regex.match(char):
                invalid_chars.add(char)
        return ",".join(
            [f"'{char}'" if char != "'" else f'"{char}"' for char in invalid_chars],
        )

    if dataset_id is None:
        pattern = "^[\x00-\x7f]*$"
        if description and not re.match(pattern, description):
            # not basiclatin (XSD complains)
            invalid_characters = find_invalid_characters(description, pattern)
            raise ValueError(
                f"Invalid symbols {invalid_characters} in description: {description}",
            )
        pattern = "^[\x00-\x7f]*$"
        if citation and not re.match(pattern, citation):
            # not basiclatin (XSD complains)
            invalid_characters = find_invalid_characters(citation, pattern)
            raise ValueError(
                f"Invalid symbols {invalid_characters} in citation: {citation}",
            )
        pattern = "^[a-zA-Z0-9_\\-\\.\\(\\),]+$"
        if not re.match(pattern, name):
            # regex given by server in error message
            invalid_characters = find_invalid_characters(name, pattern)
            raise ValueError(f"Invalid symbols {invalid_characters} in name: {name}")

    self.ignore_attribute: list[str] | None = None
    if isinstance(ignore_attribute, str):
        self.ignore_attribute = [ignore_attribute]
    elif isinstance(ignore_attribute, list) or ignore_attribute is None:
        self.ignore_attribute = ignore_attribute
    else:
        raise ValueError("Wrong data type for ignore_attribute. Should be list.")

    # TODO add function to check if the name is casual_string128
    # Attributes received by querying the RESTful API
    self.dataset_id = int(dataset_id) if dataset_id is not None else None
    self.name = name
    self.version = int(version) if version is not None else None
    self.description = description
    self.cache_format = cache_format
    # Has to be called format, otherwise there will be an XML upload error
    self.format = data_format
    self.creator = creator
    self.contributor = contributor
    self.collection_date = collection_date
    self.upload_date = upload_date
    self.language = language
    self.licence = licence
    self.url = url
    self.default_target_attribute = default_target_attribute
    self.row_id_attribute = row_id_attribute

    self.version_label = version_label
    self.citation = citation
    self.tag = tag
    self.visibility = visibility
    self.original_data_url = original_data_url
    self.paper_url = paper_url
    self.update_comment = update_comment
    self.md5_checksum = md5_checksum
    self.data_file = data_file
    self.parquet_file = parquet_file
    self._dataset = dataset
    self._parquet_url = parquet_url

    self._features: dict[int, OpenMLDataFeature] | None = None
    self._qualities: dict[str, float] | None = None
    self._no_qualities_found = False

    if features_file is not None:
        self._features = _read_features(Path(features_file))

    # "" was the old default value by `get_dataset` and maybe still used by some
    if qualities_file == "":
        # TODO(0.15): to switch to "qualities_file is not None" below and remove warning
        warnings.warn(
            "Starting from Version 0.15 `qualities_file` must be None and not an empty string "
            "to avoid reading the qualities from file. Set `qualities_file` to None to avoid "
            "this warning.",
            FutureWarning,
            stacklevel=2,
        )
        qualities_file = None

    if qualities_file is not None:
        self._qualities = _read_qualities(Path(qualities_file))

    if data_file is not None:
        data_pickle, data_feather, feather_attribute = self._compressed_cache_file_paths(
            Path(data_file)
        )
        self.data_pickle_file = data_pickle if Path(data_pickle).exists() else None
        self.data_feather_file = data_feather if Path(data_feather).exists() else None
        self.feather_attribute_file = feather_attribute if Path(feather_attribute) else None
    else:
        self.data_pickle_file = None
        self.data_feather_file = None
        self.feather_attribute_file = None

features property #

features: dict[int, OpenMLDataFeature]

Get the features of this dataset.

id property #

id: int | None

Get the dataset numeric id.

openml_url property #

openml_url: str | None

The URL of the object on the server, if it was uploaded, else None.

qualities property #

qualities: dict[str, float] | None

Get the qualities of this dataset.

get_data #

get_data(target: list[str] | str | None = None, include_row_id: bool = False, include_ignore_attribute: bool = False) -> tuple[DataFrame, Series | None, list[bool], list[str]]

Returns dataset content as dataframes.

Parameters#

target : string, List[str] or None (default=None) Name of target column to separate from the data. Splitting multiple columns is currently not supported. include_row_id : boolean (default=False) Whether to include row ids in the returned dataset. include_ignore_attribute : boolean (default=False) Whether to include columns that are marked as "ignore" on the server in the dataset.

Returns#

X : dataframe, shape (n_samples, n_columns) Dataset, may have sparse dtypes in the columns if required. y : pd.Series, shape (n_samples, ) or None Target column categorical_indicator : list[bool] Mask that indicate categorical features. attribute_names : list[str] List of attribute names.

Source code in openml/datasets/dataset.py
def get_data(  # noqa: C901
    self,
    target: list[str] | str | None = None,
    include_row_id: bool = False,  # noqa: FBT001, FBT002
    include_ignore_attribute: bool = False,  # noqa: FBT001, FBT002
) -> tuple[pd.DataFrame, pd.Series | None, list[bool], list[str]]:
    """Returns dataset content as dataframes.

    Parameters
    ----------
    target : string, List[str] or None (default=None)
        Name of target column to separate from the data.
        Splitting multiple columns is currently not supported.
    include_row_id : boolean (default=False)
        Whether to include row ids in the returned dataset.
    include_ignore_attribute : boolean (default=False)
        Whether to include columns that are marked as "ignore"
        on the server in the dataset.


    Returns
    -------
    X : dataframe, shape (n_samples, n_columns)
        Dataset, may have sparse dtypes in the columns if required.
    y : pd.Series, shape (n_samples, ) or None
        Target column
    categorical_indicator : list[bool]
        Mask that indicate categorical features.
    attribute_names : list[str]
        List of attribute names.
    """
    data, categorical_mask, attribute_names = self._load_data()

    to_exclude = []
    if not include_row_id and self.row_id_attribute is not None:
        if isinstance(self.row_id_attribute, str):
            to_exclude.append(self.row_id_attribute)
        elif isinstance(self.row_id_attribute, Iterable):
            to_exclude.extend(self.row_id_attribute)

    if not include_ignore_attribute and self.ignore_attribute is not None:
        if isinstance(self.ignore_attribute, str):
            to_exclude.append(self.ignore_attribute)
        elif isinstance(self.ignore_attribute, Iterable):
            to_exclude.extend(self.ignore_attribute)

    if len(to_exclude) > 0:
        logger.info(f"Going to remove the following attributes: {to_exclude}")
        keep = np.array([column not in to_exclude for column in attribute_names])
        data = data.drop(columns=to_exclude)
        categorical_mask = [cat for cat, k in zip(categorical_mask, keep) if k]
        attribute_names = [att for att, k in zip(attribute_names, keep) if k]

    if target is None:
        return data, None, categorical_mask, attribute_names

    if isinstance(target, str):
        target_names = target.split(",") if "," in target else [target]
    else:
        target_names = target

    # All the assumptions below for the target are dependant on the number of targets being 1
    n_targets = len(target_names)
    if n_targets > 1:
        raise NotImplementedError(f"Number of targets {n_targets} not implemented.")

    target_name = target_names[0]
    x = data.drop(columns=[target_name])
    y = data[target_name].squeeze()

    # Finally, remove the target from the list of attributes and categorical mask
    target_index = attribute_names.index(target_name)
    categorical_mask.pop(target_index)
    attribute_names.remove(target_name)

    assert isinstance(y, pd.Series)
    return x, y, categorical_mask, attribute_names

get_features_by_type #

get_features_by_type(data_type: str, exclude: list[str] | None = None, exclude_ignore_attribute: bool = True, exclude_row_id_attribute: bool = True) -> list[int]

Return indices of features of a given type, e.g. all nominal features. Optional parameters to exclude various features by index or ontology.

Parameters#

data_type : str The data type to return (e.g., nominal, numeric, date, string) exclude : list(int) List of columns to exclude from the return value exclude_ignore_attribute : bool Whether to exclude the defined ignore attributes (and adapt the return values as if these indices are not present) exclude_row_id_attribute : bool Whether to exclude the defined row id attributes (and adapt the return values as if these indices are not present)

Returns#

result : list a list of indices that have the specified data type

Source code in openml/datasets/dataset.py
def get_features_by_type(  # noqa: C901
    self,
    data_type: str,
    exclude: list[str] | None = None,
    exclude_ignore_attribute: bool = True,  # noqa: FBT002, FBT001
    exclude_row_id_attribute: bool = True,  # noqa: FBT002, FBT001
) -> list[int]:
    """
    Return indices of features of a given type, e.g. all nominal features.
    Optional parameters to exclude various features by index or ontology.

    Parameters
    ----------
    data_type : str
        The data type to return (e.g., nominal, numeric, date, string)
    exclude : list(int)
        List of columns to exclude from the return value
    exclude_ignore_attribute : bool
        Whether to exclude the defined ignore attributes (and adapt the
        return values as if these indices are not present)
    exclude_row_id_attribute : bool
        Whether to exclude the defined row id attributes (and adapt the
        return values as if these indices are not present)

    Returns
    -------
    result : list
        a list of indices that have the specified data type
    """
    if data_type not in OpenMLDataFeature.LEGAL_DATA_TYPES:
        raise TypeError("Illegal feature type requested")
    if self.ignore_attribute is not None and not isinstance(self.ignore_attribute, list):
        raise TypeError("ignore_attribute should be a list")
    if self.row_id_attribute is not None and not isinstance(self.row_id_attribute, str):
        raise TypeError("row id attribute should be a str")
    if exclude is not None and not isinstance(exclude, list):
        raise TypeError("Exclude should be a list")
        # assert all(isinstance(elem, str) for elem in exclude),
        #            "Exclude should be a list of strings"
    to_exclude = []
    if exclude is not None:
        to_exclude.extend(exclude)
    if exclude_ignore_attribute and self.ignore_attribute is not None:
        to_exclude.extend(self.ignore_attribute)
    if exclude_row_id_attribute and self.row_id_attribute is not None:
        to_exclude.append(self.row_id_attribute)

    result = []
    offset = 0
    # this function assumes that everything in to_exclude will
    # be 'excluded' from the dataset (hence the offset)
    for idx in self.features:
        name = self.features[idx].name
        if name in to_exclude:
            offset += 1
        elif self.features[idx].data_type == data_type:
            result.append(idx - offset)
    return result

open_in_browser #

open_in_browser() -> None

Opens the OpenML web page corresponding to this object in your default browser.

Source code in openml/base.py
def open_in_browser(self) -> None:
    """Opens the OpenML web page corresponding to this object in your default browser."""
    if self.openml_url is None:
        raise ValueError(
            "Cannot open element on OpenML.org when attribute `openml_url` is `None`",
        )

    webbrowser.open(self.openml_url)

publish #

publish() -> OpenMLBase

Publish the object on the OpenML server.

Source code in openml/base.py
def publish(self) -> OpenMLBase:
    """Publish the object on the OpenML server."""
    file_elements = self._get_file_elements()

    if "description" not in file_elements:
        file_elements["description"] = self._to_xml()

    call = f"{_get_rest_api_type_alias(self)}/"
    response_text = openml._api_calls._perform_api_call(
        call,
        "post",
        file_elements=file_elements,
    )
    xml_response = xmltodict.parse(response_text)

    self._parse_publish_response(xml_response)
    return self

push_tag #

push_tag(tag: str) -> None

Annotates this entity with a tag on the server.

Parameters#

tag : str Tag to attach to the flow.

Source code in openml/base.py
def push_tag(self, tag: str) -> None:
    """Annotates this entity with a tag on the server.

    Parameters
    ----------
    tag : str
        Tag to attach to the flow.
    """
    _tag_openml_base(self, tag)

remove_tag #

remove_tag(tag: str) -> None

Removes a tag from this entity on the server.

Parameters#

tag : str Tag to attach to the flow.

Source code in openml/base.py
def remove_tag(self, tag: str) -> None:
    """Removes a tag from this entity on the server.

    Parameters
    ----------
    tag : str
        Tag to attach to the flow.
    """
    _tag_openml_base(self, tag, untag=True)

retrieve_class_labels #

retrieve_class_labels(target_name: str = 'class') -> None | list[str]

Reads the datasets arff to determine the class-labels.

If the task has no class labels (for example a regression problem) it returns None. Necessary because the data returned by get_data only contains the indices of the classes, while OpenML needs the real classname when uploading the results of a run.

Parameters#

target_name : str Name of the target attribute

Returns#

list

Source code in openml/datasets/dataset.py
def retrieve_class_labels(self, target_name: str = "class") -> None | list[str]:
    """Reads the datasets arff to determine the class-labels.

    If the task has no class labels (for example a regression problem)
    it returns None. Necessary because the data returned by get_data
    only contains the indices of the classes, while OpenML needs the real
    classname when uploading the results of a run.

    Parameters
    ----------
    target_name : str
        Name of the target attribute

    Returns
    -------
    list
    """
    for feature in self.features.values():
        if feature.name == target_name:
            if feature.data_type == "nominal":
                return feature.nominal_values

            if feature.data_type == "string":
                # Rel.: #1311
                # The target is invalid for a classification task if the feature type is string
                # and not nominal. For such miss-configured tasks, we silently fix it here as
                # we can safely interpreter string as nominal.
                df, *_ = self.get_data()
                return list(df[feature.name].unique())

    return None

url_for_id classmethod #

url_for_id(id_: int) -> str

Return the OpenML URL for the object of the class entity with the given id.

Source code in openml/base.py
@classmethod
def url_for_id(cls, id_: int) -> str:
    """Return the OpenML URL for the object of the class entity with the given id."""
    # Sample url for a flow: openml.org/f/123
    return f"{openml.config.get_server_base_url()}/{cls._entity_letter()}/{id_}"

attributes_arff_from_df #

attributes_arff_from_df(df: DataFrame) -> list[tuple[str, list[str] | str]]

Describe attributes of the dataframe according to ARFF specification.

Parameters#

df : DataFrame, shape (n_samples, n_features) The dataframe containing the data set.

Returns#

attributes_arff : list[str] The data set attributes as required by the ARFF format.

Source code in openml/datasets/functions.py
def attributes_arff_from_df(df: pd.DataFrame) -> list[tuple[str, list[str] | str]]:
    """Describe attributes of the dataframe according to ARFF specification.

    Parameters
    ----------
    df : DataFrame, shape (n_samples, n_features)
        The dataframe containing the data set.

    Returns
    -------
    attributes_arff : list[str]
        The data set attributes as required by the ARFF format.
    """
    PD_DTYPES_TO_ARFF_DTYPE = {"integer": "INTEGER", "floating": "REAL", "string": "STRING"}
    attributes_arff: list[tuple[str, list[str] | str]] = []

    if not all(isinstance(column_name, str) for column_name in df.columns):
        logger.warning("Converting non-str column names to str.")
        df.columns = [str(column_name) for column_name in df.columns]

    for column_name in df:
        # skipna=True does not infer properly the dtype. The NA values are
        # dropped before the inference instead.
        column_dtype = pd.api.types.infer_dtype(df[column_name].dropna(), skipna=False)

        if column_dtype == "categorical":
            # for categorical feature, arff expects a list string. However, a
            # categorical column can contain mixed type and should therefore
            # raise an error asking to convert all entries to string.
            categories = df[column_name].cat.categories
            categories_dtype = pd.api.types.infer_dtype(categories)
            if categories_dtype not in ("string", "unicode"):
                raise ValueError(
                    f"The column '{column_name}' of the dataframe is of "
                    "'category' dtype. Therefore, all values in "
                    "this columns should be string. Please "
                    "convert the entries which are not string. "
                    f"Got {categories_dtype} dtype in this column.",
                )
            attributes_arff.append((column_name, categories.tolist()))
        elif column_dtype == "boolean":
            # boolean are encoded as categorical.
            attributes_arff.append((column_name, ["True", "False"]))
        elif column_dtype in PD_DTYPES_TO_ARFF_DTYPE:
            attributes_arff.append((column_name, PD_DTYPES_TO_ARFF_DTYPE[column_dtype]))
        else:
            raise ValueError(
                f"The dtype '{column_dtype}' of the column '{column_name}' is not "
                "currently supported by liac-arff. Supported "
                "dtypes are categorical, string, integer, "
                "floating, and boolean.",
            )
    return attributes_arff

check_datasets_active #

check_datasets_active(dataset_ids: list[int], raise_error_if_not_exist: bool = True) -> dict[int, bool]

Check if the dataset ids provided are active.

Raises an error if a dataset_id in the given list of dataset_ids does not exist on the server and raise_error_if_not_exist is set to True (default).

Parameters#

dataset_ids : List[int] A list of integers representing dataset ids. raise_error_if_not_exist : bool (default=True) Flag that if activated can raise an error, if one or more of the given dataset ids do not exist on the server.

Returns#

dict A dictionary with items {did: bool}

Source code in openml/datasets/functions.py
def check_datasets_active(
    dataset_ids: list[int],
    raise_error_if_not_exist: bool = True,  # noqa: FBT001, FBT002
) -> dict[int, bool]:
    """
    Check if the dataset ids provided are active.

    Raises an error if a dataset_id in the given list
    of dataset_ids does not exist on the server and
    `raise_error_if_not_exist` is set to True (default).

    Parameters
    ----------
    dataset_ids : List[int]
        A list of integers representing dataset ids.
    raise_error_if_not_exist : bool (default=True)
        Flag that if activated can raise an error, if one or more of the
        given dataset ids do not exist on the server.

    Returns
    -------
    dict
        A dictionary with items {did: bool}
    """
    datasets = list_datasets(status="all", data_id=dataset_ids)
    missing = set(dataset_ids) - set(datasets.index)
    if raise_error_if_not_exist and missing:
        missing_str = ", ".join(str(did) for did in missing)
        raise ValueError(f"Could not find dataset(s) {missing_str} in OpenML dataset list.")
    mask = datasets["status"] == "active"
    return dict(mask)

create_dataset #

create_dataset(name: str, description: str | None, creator: str | None, contributor: str | None, collection_date: str | None, language: str | None, licence: str | None, attributes: list[tuple[str, str | list[str]]] | dict[str, str | list[str]] | Literal['auto'], data: DataFrame | ndarray | coo_matrix, default_target_attribute: str, ignore_attribute: str | list[str] | None, citation: str, row_id_attribute: str | None = None, original_data_url: str | None = None, paper_url: str | None = None, update_comment: str | None = None, version_label: str | None = None) -> OpenMLDataset

Create a dataset.

This function creates an OpenMLDataset object. The OpenMLDataset object contains information related to the dataset and the actual data file.

Parameters#

name : str Name of the dataset. description : str Description of the dataset. creator : str The person who created the dataset. contributor : str People who contributed to the current version of the dataset. collection_date : str The date the data was originally collected, given by the uploader. language : str Language in which the data is represented. Starts with 1 upper case letter, rest lower case, e.g. 'English'. licence : str License of the data. attributes : list, dict, or 'auto' A list of tuples. Each tuple consists of the attribute name and type. If passing a pandas DataFrame, the attributes can be automatically inferred by passing 'auto'. Specific attributes can be manually specified by a passing a dictionary where the key is the name of the attribute and the value is the data type of the attribute. data : ndarray, list, dataframe, coo_matrix, shape (n_samples, n_features) An array that contains both the attributes and the targets. When providing a dataframe, the attribute names and type can be inferred by passing attributes='auto'. The target feature is indicated as meta-data of the dataset. default_target_attribute : str The default target attribute, if it exists. Can have multiple values, comma separated. ignore_attribute : str | list Attributes that should be excluded in modelling, such as identifiers and indexes. Can have multiple values, comma separated. citation : str Reference(s) that should be cited when building on this data. version_label : str, optional Version label provided by user. Can be a date, hash, or some other type of id. row_id_attribute : str, optional The attribute that represents the row-id column, if present in the dataset. If data is a dataframe and row_id_attribute is not specified, the index of the dataframe will be used as the row_id_attribute. If the name of the index is None, it will be discarded.

.. versionadded: 0.8
    Inference of ``row_id_attribute`` from a dataframe.

original_data_url : str, optional For derived data, the url to the original dataset. paper_url : str, optional Link to a paper describing the dataset. update_comment : str, optional An explanation for when the dataset is uploaded.

Returns#

class:openml.OpenMLDataset Dataset description.

Source code in openml/datasets/functions.py
def create_dataset(  # noqa: C901, PLR0912, PLR0915
    name: str,
    description: str | None,
    creator: str | None,
    contributor: str | None,
    collection_date: str | None,
    language: str | None,
    licence: str | None,
    # TODO(eddiebergman): Docstring says `type` but I don't know what this is other than strings
    # Edit: Found it could also be like ["True", "False"]
    attributes: list[tuple[str, str | list[str]]] | dict[str, str | list[str]] | Literal["auto"],
    data: pd.DataFrame | np.ndarray | scipy.sparse.coo_matrix,
    # TODO(eddiebergman): Function requires `default_target_attribute` exist but API allows None
    default_target_attribute: str,
    ignore_attribute: str | list[str] | None,
    citation: str,
    row_id_attribute: str | None = None,
    original_data_url: str | None = None,
    paper_url: str | None = None,
    update_comment: str | None = None,
    version_label: str | None = None,
) -> OpenMLDataset:
    """Create a dataset.

    This function creates an OpenMLDataset object.
    The OpenMLDataset object contains information related to the dataset
    and the actual data file.

    Parameters
    ----------
    name : str
        Name of the dataset.
    description : str
        Description of the dataset.
    creator : str
        The person who created the dataset.
    contributor : str
        People who contributed to the current version of the dataset.
    collection_date : str
        The date the data was originally collected, given by the uploader.
    language : str
        Language in which the data is represented.
        Starts with 1 upper case letter, rest lower case, e.g. 'English'.
    licence : str
        License of the data.
    attributes : list, dict, or 'auto'
        A list of tuples. Each tuple consists of the attribute name and type.
        If passing a pandas DataFrame, the attributes can be automatically
        inferred by passing ``'auto'``. Specific attributes can be manually
        specified by a passing a dictionary where the key is the name of the
        attribute and the value is the data type of the attribute.
    data : ndarray, list, dataframe, coo_matrix, shape (n_samples, n_features)
        An array that contains both the attributes and the targets. When
        providing a dataframe, the attribute names and type can be inferred by
        passing ``attributes='auto'``.
        The target feature is indicated as meta-data of the dataset.
    default_target_attribute : str
        The default target attribute, if it exists.
        Can have multiple values, comma separated.
    ignore_attribute : str | list
        Attributes that should be excluded in modelling,
        such as identifiers and indexes.
        Can have multiple values, comma separated.
    citation : str
        Reference(s) that should be cited when building on this data.
    version_label : str, optional
        Version label provided by user.
         Can be a date, hash, or some other type of id.
    row_id_attribute : str, optional
        The attribute that represents the row-id column, if present in the
        dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
        specified, the index of the dataframe will be used as the
        ``row_id_attribute``. If the name of the index is ``None``, it will
        be discarded.

        .. versionadded: 0.8
            Inference of ``row_id_attribute`` from a dataframe.
    original_data_url : str, optional
        For derived data, the url to the original dataset.
    paper_url : str, optional
        Link to a paper describing the dataset.
    update_comment : str, optional
        An explanation for when the dataset is uploaded.

    Returns
    -------
    class:`openml.OpenMLDataset`
    Dataset description.
    """
    if isinstance(data, pd.DataFrame):
        # infer the row id from the index of the dataset
        if row_id_attribute is None:
            row_id_attribute = data.index.name
        # When calling data.values, the index will be skipped.
        # We need to reset the index such that it is part of the data.
        if data.index.name is not None:
            data = data.reset_index()

    if attributes == "auto" or isinstance(attributes, dict):
        if not isinstance(data, pd.DataFrame):
            raise ValueError(
                "Automatically inferring attributes requires "
                f"a pandas DataFrame. A {data!r} was given instead.",
            )
        # infer the type of data for each column of the DataFrame
        attributes_ = attributes_arff_from_df(data)
        if isinstance(attributes, dict):
            # override the attributes which was specified by the user
            for attr_idx in range(len(attributes_)):
                attr_name = attributes_[attr_idx][0]
                if attr_name in attributes:
                    attributes_[attr_idx] = (attr_name, attributes[attr_name])
    else:
        attributes_ = attributes
    ignore_attributes = _expand_parameter(ignore_attribute)
    _validated_data_attributes(ignore_attributes, attributes_, "ignore_attribute")

    default_target_attributes = _expand_parameter(default_target_attribute)
    _validated_data_attributes(default_target_attributes, attributes_, "default_target_attribute")

    if row_id_attribute is not None:
        is_row_id_an_attribute = any(attr[0] == row_id_attribute for attr in attributes_)
        if not is_row_id_an_attribute:
            raise ValueError(
                "'row_id_attribute' should be one of the data attribute. "
                f" Got '{row_id_attribute}' while candidates are"
                f" {[attr[0] for attr in attributes_]}.",
            )

    if isinstance(data, pd.DataFrame):
        if all(isinstance(dtype, pd.SparseDtype) for dtype in data.dtypes):
            data = data.sparse.to_coo()
            # liac-arff only support COO matrices with sorted rows
            row_idx_sorted = np.argsort(data.row)  # type: ignore
            data.row = data.row[row_idx_sorted]  # type: ignore
            data.col = data.col[row_idx_sorted]  # type: ignore
            data.data = data.data[row_idx_sorted]  # type: ignore
        else:
            data = data.to_numpy()

    data_format: Literal["arff", "sparse_arff"]
    if isinstance(data, (list, np.ndarray)):
        if isinstance(data[0], (list, np.ndarray)):
            data_format = "arff"
        elif isinstance(data[0], dict):
            data_format = "sparse_arff"
        else:
            raise ValueError(
                "When giving a list or a numpy.ndarray, "
                "they should contain a list/ numpy.ndarray "
                "for dense data or a dictionary for sparse "
                f"data. Got {data[0]!r} instead.",
            )
    elif isinstance(data, coo_matrix):
        data_format = "sparse_arff"
    else:
        raise ValueError(
            "When giving a list or a numpy.ndarray, "
            "they should contain a list/ numpy.ndarray "
            "for dense data or a dictionary for sparse "
            f"data. Got {data[0]!r} instead.",
        )

    arff_object = {
        "relation": name,
        "description": description,
        "attributes": attributes_,
        "data": data,
    }

    # serializes the ARFF dataset object and returns a string
    arff_dataset = arff.dumps(arff_object)
    try:
        # check if ARFF is valid
        decoder = arff.ArffDecoder()
        return_type = arff.COO if data_format == "sparse_arff" else arff.DENSE
        decoder.decode(arff_dataset, encode_nominal=True, return_type=return_type)
    except arff.ArffException as e:
        raise ValueError(
            "The arguments you have provided do not construct a valid ARFF file"
        ) from e

    return OpenMLDataset(
        name=name,
        description=description,
        data_format=data_format,
        creator=creator,
        contributor=contributor,
        collection_date=collection_date,
        language=language,
        licence=licence,
        default_target_attribute=default_target_attribute,
        row_id_attribute=row_id_attribute,
        ignore_attribute=ignore_attribute,
        citation=citation,
        version_label=version_label,
        original_data_url=original_data_url,
        paper_url=paper_url,
        update_comment=update_comment,
        dataset=arff_dataset,
    )

delete_dataset #

delete_dataset(dataset_id: int) -> bool

Delete dataset with id dataset_id from the OpenML server.

This can only be done if you are the owner of the dataset and no tasks are attached to the dataset.

Parameters#

dataset_id : int OpenML id of the dataset

Returns#

bool True if the deletion was successful. False otherwise.

Source code in openml/datasets/functions.py
def delete_dataset(dataset_id: int) -> bool:
    """Delete dataset with id `dataset_id` from the OpenML server.

    This can only be done if you are the owner of the dataset and
    no tasks are attached to the dataset.

    Parameters
    ----------
    dataset_id : int
        OpenML id of the dataset

    Returns
    -------
    bool
        True if the deletion was successful. False otherwise.
    """
    return openml.utils._delete_entity("data", dataset_id)

edit_dataset #

edit_dataset(data_id: int, description: str | None = None, creator: str | None = None, contributor: str | None = None, collection_date: str | None = None, language: str | None = None, default_target_attribute: str | None = None, ignore_attribute: str | list[str] | None = None, citation: str | None = None, row_id_attribute: str | None = None, original_data_url: str | None = None, paper_url: str | None = None) -> int

Edits an OpenMLDataset.

In addition to providing the dataset id of the dataset to edit (through data_id), you must specify a value for at least one of the optional function arguments, i.e. one value for a field to edit.

This function allows editing of both non-critical and critical fields. Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.

  • Editing non-critical data fields is allowed for all authenticated users.
  • Editing critical fields is allowed only for the owner, provided there are no tasks associated with this dataset.

If dataset has tasks or if the user is not the owner, the only way to edit critical fields is to use fork_dataset followed by edit_dataset.

Parameters#

data_id : int ID of the dataset. description : str Description of the dataset. creator : str The person who created the dataset. contributor : str People who contributed to the current version of the dataset. collection_date : str The date the data was originally collected, given by the uploader. language : str Language in which the data is represented. Starts with 1 upper case letter, rest lower case, e.g. 'English'. default_target_attribute : str The default target attribute, if it exists. Can have multiple values, comma separated. ignore_attribute : str | list Attributes that should be excluded in modelling, such as identifiers and indexes. citation : str Reference(s) that should be cited when building on this data. row_id_attribute : str, optional The attribute that represents the row-id column, if present in the dataset. If data is a dataframe and row_id_attribute is not specified, the index of the dataframe will be used as the row_id_attribute. If the name of the index is None, it will be discarded.

.. versionadded: 0.8
    Inference of ``row_id_attribute`` from a dataframe.

original_data_url : str, optional For derived data, the url to the original dataset. paper_url : str, optional Link to a paper describing the dataset.

Returns#

Dataset id

Source code in openml/datasets/functions.py
def edit_dataset(
    data_id: int,
    description: str | None = None,
    creator: str | None = None,
    contributor: str | None = None,
    collection_date: str | None = None,
    language: str | None = None,
    default_target_attribute: str | None = None,
    ignore_attribute: str | list[str] | None = None,
    citation: str | None = None,
    row_id_attribute: str | None = None,
    original_data_url: str | None = None,
    paper_url: str | None = None,
) -> int:
    """Edits an OpenMLDataset.

    In addition to providing the dataset id of the dataset to edit (through data_id),
    you must specify a value for at least one of the optional function arguments,
    i.e. one value for a field to edit.

    This function allows editing of both non-critical and critical fields.
    Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.

     - Editing non-critical data fields is allowed for all authenticated users.
     - Editing critical fields is allowed only for the owner, provided there are no tasks
       associated with this dataset.

    If dataset has tasks or if the user is not the owner, the only way
    to edit critical fields is to use fork_dataset followed by edit_dataset.

    Parameters
    ----------
    data_id : int
        ID of the dataset.
    description : str
        Description of the dataset.
    creator : str
        The person who created the dataset.
    contributor : str
        People who contributed to the current version of the dataset.
    collection_date : str
        The date the data was originally collected, given by the uploader.
    language : str
        Language in which the data is represented.
        Starts with 1 upper case letter, rest lower case, e.g. 'English'.
    default_target_attribute : str
        The default target attribute, if it exists.
        Can have multiple values, comma separated.
    ignore_attribute : str | list
        Attributes that should be excluded in modelling,
        such as identifiers and indexes.
    citation : str
        Reference(s) that should be cited when building on this data.
    row_id_attribute : str, optional
        The attribute that represents the row-id column, if present in the
        dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
        specified, the index of the dataframe will be used as the
        ``row_id_attribute``. If the name of the index is ``None``, it will
        be discarded.

        .. versionadded: 0.8
            Inference of ``row_id_attribute`` from a dataframe.
    original_data_url : str, optional
        For derived data, the url to the original dataset.
    paper_url : str, optional
        Link to a paper describing the dataset.

    Returns
    -------
    Dataset id
    """
    if not isinstance(data_id, int):
        raise TypeError(f"`data_id` must be of type `int`, not {type(data_id)}.")

    # compose data edit parameters as xml
    form_data = {"data_id": data_id}  # type: openml._api_calls.DATA_TYPE
    xml = OrderedDict()  # type: 'OrderedDict[str, OrderedDict]'
    xml["oml:data_edit_parameters"] = OrderedDict()
    xml["oml:data_edit_parameters"]["@xmlns:oml"] = "http://openml.org/openml"
    xml["oml:data_edit_parameters"]["oml:description"] = description
    xml["oml:data_edit_parameters"]["oml:creator"] = creator
    xml["oml:data_edit_parameters"]["oml:contributor"] = contributor
    xml["oml:data_edit_parameters"]["oml:collection_date"] = collection_date
    xml["oml:data_edit_parameters"]["oml:language"] = language
    xml["oml:data_edit_parameters"]["oml:default_target_attribute"] = default_target_attribute
    xml["oml:data_edit_parameters"]["oml:row_id_attribute"] = row_id_attribute
    xml["oml:data_edit_parameters"]["oml:ignore_attribute"] = ignore_attribute
    xml["oml:data_edit_parameters"]["oml:citation"] = citation
    xml["oml:data_edit_parameters"]["oml:original_data_url"] = original_data_url
    xml["oml:data_edit_parameters"]["oml:paper_url"] = paper_url

    # delete None inputs
    for k in list(xml["oml:data_edit_parameters"]):
        if not xml["oml:data_edit_parameters"][k]:
            del xml["oml:data_edit_parameters"][k]

    file_elements = {
        "edit_parameters": ("description.xml", xmltodict.unparse(xml)),
    }  # type: openml._api_calls.FILE_ELEMENTS_TYPE
    result_xml = openml._api_calls._perform_api_call(
        "data/edit",
        "post",
        data=form_data,
        file_elements=file_elements,
    )
    result = xmltodict.parse(result_xml)
    data_id = result["oml:data_edit"]["oml:id"]
    return int(data_id)

fork_dataset #

fork_dataset(data_id: int) -> int

Creates a new dataset version, with the authenticated user as the new owner. The forked dataset can have distinct dataset meta-data, but the actual data itself is shared with the original version.

This API is intended for use when a user is unable to edit the critical fields of a dataset through the edit_dataset API. (Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.)

Specifically, this happens when the user is: 1. Not the owner of the dataset. 2. User is the owner of the dataset, but the dataset has tasks.

In these two cases the only way to edit critical fields is: 1. STEP 1: Fork the dataset using fork_dataset API 2. STEP 2: Call edit_dataset API on the forked version.

Parameters#

data_id : int id of the dataset to be forked

Returns#

Dataset id of the forked dataset

Source code in openml/datasets/functions.py
def fork_dataset(data_id: int) -> int:
    """
     Creates a new dataset version, with the authenticated user as the new owner.
     The forked dataset can have distinct dataset meta-data,
     but the actual data itself is shared with the original version.

     This API is intended for use when a user is unable to edit the critical fields of a dataset
     through the edit_dataset API.
     (Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.)

     Specifically, this happens when the user is:
            1. Not the owner of the dataset.
            2. User is the owner of the dataset, but the dataset has tasks.

     In these two cases the only way to edit critical fields is:
            1. STEP 1: Fork the dataset using fork_dataset API
            2. STEP 2: Call edit_dataset API on the forked version.


    Parameters
    ----------
    data_id : int
        id of the dataset to be forked

    Returns
    -------
    Dataset id of the forked dataset

    """
    if not isinstance(data_id, int):
        raise TypeError(f"`data_id` must be of type `int`, not {type(data_id)}.")
    # compose data fork parameters
    form_data = {"data_id": data_id}  # type: openml._api_calls.DATA_TYPE
    result_xml = openml._api_calls._perform_api_call("data/fork", "post", data=form_data)
    result = xmltodict.parse(result_xml)
    data_id = result["oml:data_fork"]["oml:id"]
    return int(data_id)

get_dataset #

get_dataset(dataset_id: int | str, download_data: bool = False, version: int | None = None, error_if_multiple: bool = False, cache_format: Literal['pickle', 'feather'] = 'pickle', download_qualities: bool = False, download_features_meta_data: bool = False, download_all_files: bool = False, force_refresh_cache: bool = False) -> OpenMLDataset

Download the OpenML dataset representation, optionally also download actual data file.

This function is by default NOT thread/multiprocessing safe, as this function uses caching. A check will be performed to determine if the information has previously been downloaded to a cache, and if so be loaded from disk instead of retrieved from the server.

To make this function thread safe, you can install the python package oslo.concurrency. If oslo.concurrency is installed get_dataset becomes thread safe.

Alternatively, to make this function thread/multiprocessing safe initialize the cache first by calling get_dataset(args) once before calling get_dataset(args) many times in parallel. This will initialize the cache and later calls will use the cache in a thread/multiprocessing safe way.

If dataset is retrieved by name, a version may be specified. If no version is specified and multiple versions of the dataset exist, the earliest version of the dataset that is still active will be returned. If no version is specified, multiple versions of the dataset exist and exception_if_multiple is set to True, this function will raise an exception.

Parameters#

dataset_id : int or str Dataset ID (integer) or dataset name (string) of the dataset to download. download_data : bool (default=False) If True, also download the data file. Beware that some datasets are large and it might make the operation noticeably slower. Metadata is also still retrieved. If False, create the OpenMLDataset and only populate it with the metadata. The data may later be retrieved through the OpenMLDataset.get_data method. version : int, optional (default=None) Specifies the version if dataset_id is specified by name. If no version is specified, retrieve the least recent still active version. error_if_multiple : bool (default=False) If True raise an error if multiple datasets are found with matching criteria. cache_format : str (default='pickle') in {'pickle', 'feather'} Format for caching the dataset - may be feather or pickle Note that the default 'pickle' option may load slower than feather when no.of.rows is very high. download_qualities : bool (default=False) Option to download 'qualities' meta-data in addition to the minimal dataset description. If True, download and cache the qualities file. If False, create the OpenMLDataset without qualities metadata. The data may later be added to the OpenMLDataset through the OpenMLDataset.load_metadata(qualities=True) method. download_features_meta_data : bool (default=False) Option to download 'features' meta-data in addition to the minimal dataset description. If True, download and cache the features file. If False, create the OpenMLDataset without features metadata. The data may later be added to the OpenMLDataset through the OpenMLDataset.load_metadata(features=True) method. download_all_files: bool (default=False) EXPERIMENTAL. Download all files related to the dataset that reside on the server. Useful for datasets which refer to auxiliary files (e.g., meta-album). force_refresh_cache : bool (default=False) Force the cache to refreshed by deleting the cache directory and re-downloading the data. Note, if force_refresh_cache is True, get_dataset is NOT thread/multiprocessing safe, because this creates a race condition to creating and deleting the cache; as in general with the cache.

Returns#

dataset : :class:openml.OpenMLDataset The downloaded dataset.

Source code in openml/datasets/functions.py
@openml.utils.thread_safe_if_oslo_installed
def get_dataset(  # noqa: C901, PLR0912
    dataset_id: int | str,
    download_data: bool = False,  # noqa: FBT002, FBT001
    version: int | None = None,
    error_if_multiple: bool = False,  # noqa: FBT002, FBT001
    cache_format: Literal["pickle", "feather"] = "pickle",
    download_qualities: bool = False,  # noqa: FBT002, FBT001
    download_features_meta_data: bool = False,  # noqa: FBT002, FBT001
    download_all_files: bool = False,  # noqa: FBT002, FBT001
    force_refresh_cache: bool = False,  # noqa: FBT001, FBT002
) -> OpenMLDataset:
    """Download the OpenML dataset representation, optionally also download actual data file.

    This function is by default NOT thread/multiprocessing safe, as this function uses caching.
    A check will be performed to determine if the information has previously been downloaded to a
    cache, and if so be loaded from disk instead of retrieved from the server.

    To make this function thread safe, you can install the python package ``oslo.concurrency``.
    If ``oslo.concurrency`` is installed `get_dataset` becomes thread safe.

    Alternatively, to make this function thread/multiprocessing safe initialize the cache first by
    calling `get_dataset(args)` once before calling `get_dataset(args)` many times in parallel.
    This will initialize the cache and later calls will use the cache in a thread/multiprocessing
    safe way.

    If dataset is retrieved by name, a version may be specified.
    If no version is specified and multiple versions of the dataset exist,
    the earliest version of the dataset that is still active will be returned.
    If no version is specified, multiple versions of the dataset exist and
    ``exception_if_multiple`` is set to ``True``, this function will raise an exception.

    Parameters
    ----------
    dataset_id : int or str
        Dataset ID (integer) or dataset name (string) of the dataset to download.
    download_data : bool (default=False)
        If True, also download the data file. Beware that some datasets are large and it might
        make the operation noticeably slower. Metadata is also still retrieved.
        If False, create the OpenMLDataset and only populate it with the metadata.
        The data may later be retrieved through the `OpenMLDataset.get_data` method.
    version : int, optional (default=None)
        Specifies the version if `dataset_id` is specified by name.
        If no version is specified, retrieve the least recent still active version.
    error_if_multiple : bool (default=False)
        If ``True`` raise an error if multiple datasets are found with matching criteria.
    cache_format : str (default='pickle') in {'pickle', 'feather'}
        Format for caching the dataset - may be feather or pickle
        Note that the default 'pickle' option may load slower than feather when
        no.of.rows is very high.
    download_qualities : bool (default=False)
        Option to download 'qualities' meta-data in addition to the minimal dataset description.
        If True, download and cache the qualities file.
        If False, create the OpenMLDataset without qualities metadata. The data may later be added
        to the OpenMLDataset through the `OpenMLDataset.load_metadata(qualities=True)` method.
    download_features_meta_data : bool (default=False)
        Option to download 'features' meta-data in addition to the minimal dataset description.
        If True, download and cache the features file.
        If False, create the OpenMLDataset without features metadata. The data may later be added
        to the OpenMLDataset through the `OpenMLDataset.load_metadata(features=True)` method.
    download_all_files: bool (default=False)
        EXPERIMENTAL. Download all files related to the dataset that reside on the server.
        Useful for datasets which refer to auxiliary files (e.g., meta-album).
    force_refresh_cache : bool (default=False)
        Force the cache to refreshed by deleting the cache directory and re-downloading the data.
        Note, if `force_refresh_cache` is True, `get_dataset` is NOT thread/multiprocessing safe,
        because this creates a race condition to creating and deleting the cache; as in general with
        the cache.

    Returns
    -------
    dataset : :class:`openml.OpenMLDataset`
        The downloaded dataset.
    """
    if download_all_files:
        warnings.warn(
            "``download_all_files`` is experimental and is likely to break with new releases.",
            FutureWarning,
            stacklevel=2,
        )

    if cache_format not in ["feather", "pickle"]:
        raise ValueError(
            "cache_format must be one of 'feather' or 'pickle. "
            f"Invalid format specified: {cache_format}",
        )

    if isinstance(dataset_id, str):
        try:
            dataset_id = int(dataset_id)
        except ValueError:
            dataset_id = _name_to_id(dataset_id, version, error_if_multiple)  # type: ignore
    elif not isinstance(dataset_id, int):
        raise TypeError(
            f"`dataset_id` must be one of `str` or `int`, not {type(dataset_id)}.",
        )

    if force_refresh_cache:
        did_cache_dir = _get_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, dataset_id)
        if did_cache_dir.exists():
            _remove_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, did_cache_dir)

    did_cache_dir = _create_cache_directory_for_id(
        DATASETS_CACHE_DIR_NAME,
        dataset_id,
    )

    remove_dataset_cache = True
    try:
        description = _get_dataset_description(did_cache_dir, dataset_id)
        features_file = None
        qualities_file = None

        if download_features_meta_data:
            features_file = _get_dataset_features_file(did_cache_dir, dataset_id)
        if download_qualities:
            qualities_file = _get_dataset_qualities_file(did_cache_dir, dataset_id)

        parquet_file = None
        skip_parquet = os.environ.get(OPENML_SKIP_PARQUET_ENV_VAR, "false").casefold() == "true"
        download_parquet = "oml:parquet_url" in description and not skip_parquet
        if download_parquet and (download_data or download_all_files):
            try:
                parquet_file = _get_dataset_parquet(
                    description,
                    download_all_files=download_all_files,
                )
            except urllib3.exceptions.MaxRetryError:
                parquet_file = None

        arff_file = None
        if parquet_file is None and download_data:
            if download_parquet:
                logger.warning("Failed to download parquet, fallback on ARFF.")
            arff_file = _get_dataset_arff(description)

        remove_dataset_cache = False
    except OpenMLServerException as e:
        # if there was an exception
        # check if the user had access to the dataset
        if e.code == NO_ACCESS_GRANTED_ERRCODE:
            raise OpenMLPrivateDatasetError(e.message) from None

        raise e
    finally:
        if remove_dataset_cache:
            _remove_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, did_cache_dir)

    return _create_dataset_from_description(
        description,
        features_file,
        qualities_file,
        arff_file,
        parquet_file,
        cache_format,
    )

get_datasets #

get_datasets(dataset_ids: list[str | int], download_data: bool = False, download_qualities: bool = False) -> list[OpenMLDataset]

Download datasets.

This function iterates :meth:openml.datasets.get_dataset.

Parameters#

dataset_ids : iterable Integers or strings representing dataset ids or dataset names. If dataset names are specified, the least recent still active dataset version is returned. download_data : bool, optional If True, also download the data file. Beware that some datasets are large and it might make the operation noticeably slower. Metadata is also still retrieved. If False, create the OpenMLDataset and only populate it with the metadata. The data may later be retrieved through the OpenMLDataset.get_data method. download_qualities : bool, optional (default=True) If True, also download qualities.xml file. If False it skip the qualities.xml.

Returns#

datasets : list of datasets A list of dataset objects.

Source code in openml/datasets/functions.py
def get_datasets(
    dataset_ids: list[str | int],
    download_data: bool = False,  # noqa: FBT001, FBT002
    download_qualities: bool = False,  # noqa: FBT001, FBT002
) -> list[OpenMLDataset]:
    """Download datasets.

    This function iterates :meth:`openml.datasets.get_dataset`.

    Parameters
    ----------
    dataset_ids : iterable
        Integers or strings representing dataset ids or dataset names.
        If dataset names are specified, the least recent still active dataset version is returned.
    download_data : bool, optional
        If True, also download the data file. Beware that some datasets are large and it might
        make the operation noticeably slower. Metadata is also still retrieved.
        If False, create the OpenMLDataset and only populate it with the metadata.
        The data may later be retrieved through the `OpenMLDataset.get_data` method.
    download_qualities : bool, optional (default=True)
        If True, also download qualities.xml file. If False it skip the qualities.xml.

    Returns
    -------
    datasets : list of datasets
        A list of dataset objects.
    """
    datasets = []
    for dataset_id in dataset_ids:
        datasets.append(
            get_dataset(dataset_id, download_data, download_qualities=download_qualities),
        )
    return datasets

list_datasets #

list_datasets(data_id: list[int] | None = None, offset: int | None = None, size: int | None = None, status: str | None = None, tag: str | None = None, data_name: str | None = None, data_version: int | None = None, number_instances: int | str | None = None, number_features: int | str | None = None, number_classes: int | str | None = None, number_missing_values: int | str | None = None) -> DataFrame

Return a dataframe of all dataset which are on OpenML.

Supports large amount of results.

Parameters#

data_id : list, optional A list of data ids, to specify which datasets should be listed offset : int, optional The number of datasets to skip, starting from the first. size : int, optional The maximum number of datasets to show. status : str, optional Should be {active, in_preparation, deactivated}. By default active datasets are returned, but also datasets from another status can be requested. tag : str, optional data_name : str, optional data_version : int, optional number_instances : int | str, optional number_features : int | str, optional number_classes : int | str, optional number_missing_values : int | str, optional

Returns#

datasets: dataframe Each row maps to a dataset Each column contains the following information: - dataset id - name - format - status If qualities are calculated for the dataset, some of these are also included as columns.

Source code in openml/datasets/functions.py
def list_datasets(
    data_id: list[int] | None = None,
    offset: int | None = None,
    size: int | None = None,
    status: str | None = None,
    tag: str | None = None,
    data_name: str | None = None,
    data_version: int | None = None,
    number_instances: int | str | None = None,
    number_features: int | str | None = None,
    number_classes: int | str | None = None,
    number_missing_values: int | str | None = None,
) -> pd.DataFrame:
    """Return a dataframe of all dataset which are on OpenML.

    Supports large amount of results.

    Parameters
    ----------
    data_id : list, optional
        A list of data ids, to specify which datasets should be
        listed
    offset : int, optional
        The number of datasets to skip, starting from the first.
    size : int, optional
        The maximum number of datasets to show.
    status : str, optional
        Should be {active, in_preparation, deactivated}. By
        default active datasets are returned, but also datasets
        from another status can be requested.
    tag : str, optional
    data_name : str, optional
    data_version : int, optional
    number_instances : int | str, optional
    number_features : int | str, optional
    number_classes : int | str, optional
    number_missing_values : int | str, optional

    Returns
    -------
    datasets: dataframe
        Each row maps to a dataset
        Each column contains the following information:
        - dataset id
        - name
        - format
        - status
        If qualities are calculated for the dataset, some of
        these are also included as columns.
    """
    listing_call = partial(
        _list_datasets,
        data_id=data_id,
        status=status,
        tag=tag,
        data_name=data_name,
        data_version=data_version,
        number_instances=number_instances,
        number_features=number_features,
        number_classes=number_classes,
        number_missing_values=number_missing_values,
    )
    batches = openml.utils._list_all(listing_call, offset=offset, limit=size)
    if len(batches) == 0:
        return pd.DataFrame()

    return pd.concat(batches)

list_qualities #

list_qualities() -> list[str]

Return list of data qualities available.

The function performs an API call to retrieve the entire list of data qualities that are computed on the datasets uploaded.

Returns#

list

Source code in openml/datasets/functions.py
def list_qualities() -> list[str]:
    """Return list of data qualities available.

    The function performs an API call to retrieve the entire list of
    data qualities that are computed on the datasets uploaded.

    Returns
    -------
    list
    """
    api_call = "data/qualities/list"
    xml_string = openml._api_calls._perform_api_call(api_call, "get")
    qualities = xmltodict.parse(xml_string, force_list=("oml:quality"))
    # Minimalistic check if the XML is useful
    if "oml:data_qualities_list" not in qualities:
        raise ValueError('Error in return XML, does not contain "oml:data_qualities_list"')

    if not isinstance(qualities["oml:data_qualities_list"]["oml:quality"], list):
        raise TypeError('Error in return XML, does not contain "oml:quality" as a list')

    return qualities["oml:data_qualities_list"]["oml:quality"]

status_update #

status_update(data_id: int, status: Literal['active', 'deactivated']) -> None

Updates the status of a dataset to either 'active' or 'deactivated'. Please see the OpenML API documentation for a description of the status and all legal status transitions: docs.openml.org/concepts/data/#dataset-status

Parameters#

data_id : int The data id of the dataset status : str, 'active' or 'deactivated'

Source code in openml/datasets/functions.py
def status_update(data_id: int, status: Literal["active", "deactivated"]) -> None:
    """
    Updates the status of a dataset to either 'active' or 'deactivated'.
    Please see the OpenML API documentation for a description of the status
    and all legal status transitions:
    https://docs.openml.org/concepts/data/#dataset-status

    Parameters
    ----------
    data_id : int
        The data id of the dataset
    status : str,
        'active' or 'deactivated'
    """
    legal_status = {"active", "deactivated"}
    if status not in legal_status:
        raise ValueError(f"Illegal status value. Legal values: {legal_status}")

    data: openml._api_calls.DATA_TYPE = {"data_id": data_id, "status": status}
    result_xml = openml._api_calls._perform_api_call("data/status/update", "post", data=data)
    result = xmltodict.parse(result_xml)
    server_data_id = result["oml:data_status_update"]["oml:id"]
    server_status = result["oml:data_status_update"]["oml:status"]
    if status != server_status or int(data_id) != int(server_data_id):
        # This should never happen
        raise ValueError("Data id/status does not collide")