Module astrapy.info

Expand source code
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from astrapy.data.info.collection_descriptor import (
    CollectionDefaultIDOptions,
    CollectionDefinition,
    CollectionDescriptor,
    CollectionInfo,
    CollectionVectorOptions,
)
from astrapy.data.info.database_info import (
    AstraDBAdminDatabaseInfo,
    AstraDBDatabaseInfo,
)
from astrapy.data.info.table_descriptor.table_altering import (
    AlterTableAddColumns,
    AlterTableAddVectorize,
    AlterTableDropColumns,
    AlterTableDropVectorize,
)
from astrapy.data.info.table_descriptor.table_columns import (
    TableAPISupportDescriptor,
    TableKeyValuedColumnTypeDescriptor,
    TablePrimaryKeyDescriptor,
    TableScalarColumnTypeDescriptor,
    TableUnsupportedColumnTypeDescriptor,
    TableValuedColumnTypeDescriptor,
    TableVectorColumnTypeDescriptor,
)
from astrapy.data.info.table_descriptor.table_creation import (
    CreateTableDefinition,
)
from astrapy.data.info.table_descriptor.table_indexes import (
    TableAPIIndexSupportDescriptor,
    TableBaseIndexDefinition,
    TableIndexDefinition,
    TableIndexDescriptor,
    TableIndexOptions,
    TableUnsupportedIndexDefinition,
    TableVectorIndexDefinition,
    TableVectorIndexOptions,
)
from astrapy.data.info.table_descriptor.table_listing import (
    ListTableDefinition,
    ListTableDescriptor,
    TableInfo,
)
from astrapy.data.info.vectorize import (
    EmbeddingProvider,
    EmbeddingProviderAuthentication,
    EmbeddingProviderModel,
    EmbeddingProviderParameter,
    EmbeddingProviderToken,
    FindEmbeddingProvidersResult,
    VectorServiceOptions,
)
from astrapy.data.utils.table_types import (
    ColumnType,
    TableKeyValuedColumnType,
    TableValuedColumnType,
)

__all__ = [
    "AlterTableAddColumns",
    "AlterTableAddVectorize",
    "AlterTableDropColumns",
    "AlterTableDropVectorize",
    "AstraDBAdminDatabaseInfo",
    "AstraDBDatabaseInfo",
    "CollectionDefaultIDOptions",
    "CollectionDefinition",
    "CollectionDescriptor",
    "CollectionInfo",
    "CollectionVectorOptions",
    "ColumnType",
    "CreateTableDefinition",
    "EmbeddingProvider",
    "EmbeddingProviderAuthentication",
    "EmbeddingProviderModel",
    "EmbeddingProviderParameter",
    "EmbeddingProviderToken",
    "FindEmbeddingProvidersResult",
    "ListTableDefinition",
    "ListTableDescriptor",
    "TableAPIIndexSupportDescriptor",
    "TableAPISupportDescriptor",
    "TableBaseIndexDefinition",
    "TableIndexDefinition",
    "TableIndexDescriptor",
    "TableIndexOptions",
    "TableInfo",
    "TableKeyValuedColumnType",
    "TableKeyValuedColumnTypeDescriptor",
    "TablePrimaryKeyDescriptor",
    "TableScalarColumnTypeDescriptor",
    "TableUnsupportedColumnTypeDescriptor",
    "TableUnsupportedIndexDefinition",
    "TableValuedColumnType",
    "TableValuedColumnTypeDescriptor",
    "TableVectorColumnTypeDescriptor",
    "TableVectorIndexDefinition",
    "TableVectorIndexOptions",
    "VectorServiceOptions",
]

Classes

class AlterTableAddColumns (*, columns: dict[str, TableColumnTypeDescriptor])

An object representing the alter-table operation of adding column(s), for use as argument to the table's alter() method.

Attributes

columns
a mapping between the names of the columns to add and TableColumnTypeDescriptor objects, formatted in the same way as the columns attribute of CreateTableDefinition.
Expand source code
@dataclass
class AlterTableAddColumns(AlterTableOperation):
    """
    An object representing the alter-table operation of adding column(s),
    for use as argument to the table's `alter()` method.

    Attributes:
        columns: a mapping between the names of the columns to add and
            `TableColumnTypeDescriptor` objects, formatted in the same way as
            the `columns` attribute of `CreateTableDefinition`.
    """

    columns: dict[str, TableColumnTypeDescriptor]

    def __init__(self, *, columns: dict[str, TableColumnTypeDescriptor]) -> None:
        self._name = "add"
        self.columns = columns

    def __repr__(self) -> str:
        _col_desc = f"columns=[{','.join(self.columns.keys())}]"
        return f"{self.__class__.__name__}({_col_desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""
        return {
            "columns": {col_n: col_v.as_dict() for col_n, col_v in self.columns.items()}
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableAddColumns:
        """
        Create an instance of AlterTableAddColumns from a dictionary
        such as one suitable as (partial) command payload.
        """

        _warn_residual_keys(cls, raw_dict, {"columns"})
        return AlterTableAddColumns(
            columns={
                col_n: TableColumnTypeDescriptor.coerce(col_v)
                for col_n, col_v in raw_dict["columns"].items()
            },
        )

    @classmethod
    def coerce(
        cls, raw_input: AlterTableAddColumns | dict[str, Any]
    ) -> AlterTableAddColumns:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into an AlterTableAddColumns.
        """

        if isinstance(raw_input, AlterTableAddColumns):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Ancestors

Class variables

var columns : dict[str, TableColumnTypeDescriptor]

Static methods

def coerce(raw_input: AlterTableAddColumns | dict[str, Any]) ‑> AlterTableAddColumns

Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddColumns.

Expand source code
@classmethod
def coerce(
    cls, raw_input: AlterTableAddColumns | dict[str, Any]
) -> AlterTableAddColumns:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into an AlterTableAddColumns.
    """

    if isinstance(raw_input, AlterTableAddColumns):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""
    return {
        "columns": {col_n: col_v.as_dict() for col_n, col_v in self.columns.items()}
    }

Inherited members

class AlterTableAddVectorize (*, columns: dict[str, VectorServiceOptions])

An object representing the alter-table operation of enabling the vectorize service (i.e. server-side embedding computation) on one or more columns, for use as argument to the table's alter() method.

Attributes

columns
a mapping between column names and the corresponding VectorServiceOptions objects describing the settings for the desired vectorize service.
Expand source code
@dataclass
class AlterTableAddVectorize(AlterTableOperation):
    """
    An object representing the alter-table operation of enabling the vectorize service
    (i.e. server-side embedding computation) on one or more columns,
    for use as argument to the table's `alter()` method.

    Attributes:
        columns: a mapping between column names and the corresponding
            `VectorServiceOptions` objects describing the settings for the
            desired vectorize service.
    """

    columns: dict[str, VectorServiceOptions]

    def __init__(self, *, columns: dict[str, VectorServiceOptions]) -> None:
        self._name = "addVectorize"
        self.columns = columns

    def __repr__(self) -> str:
        _cols_desc = [
            f"{col_n}({col_svc.provider}/{col_svc.model_name})"
            for col_n, col_svc in self.columns.items()
        ]
        return f"{self.__class__.__name__}(columns={', '.join(_cols_desc)})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""
        return {
            "columns": {
                col_n: col_svc.as_dict() for col_n, col_svc in self.columns.items()
            }
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableAddVectorize:
        """
        Create an instance of AlterTableAddVectorize from a dictionary
        such as one suitable as (partial) command payload.
        """
        _warn_residual_keys(cls, raw_dict, {"columns"})
        _columns: dict[str, VectorServiceOptions | None] = {
            col_n: VectorServiceOptions.coerce(col_v)
            for col_n, col_v in raw_dict["columns"].items()
        }
        if any(_col_svc is None for _col_svc in _columns.values()):
            raise ValueError(
                "Vector service definition cannot be None for AlterTableAddVectorize"
            )
        return AlterTableAddVectorize(
            columns=cast(
                dict[str, VectorServiceOptions],
                _columns,
            )
        )

    @classmethod
    def coerce(
        cls, raw_input: AlterTableAddVectorize | dict[str, Any]
    ) -> AlterTableAddVectorize:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into an AlterTableAddVectorize.
        """

        if isinstance(raw_input, AlterTableAddVectorize):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Ancestors

Class variables

var columns : dict[str, VectorServiceOptions]

Static methods

def coerce(raw_input: AlterTableAddVectorize | dict[str, Any]) ‑> AlterTableAddVectorize

Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddVectorize.

Expand source code
@classmethod
def coerce(
    cls, raw_input: AlterTableAddVectorize | dict[str, Any]
) -> AlterTableAddVectorize:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into an AlterTableAddVectorize.
    """

    if isinstance(raw_input, AlterTableAddVectorize):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""
    return {
        "columns": {
            col_n: col_svc.as_dict() for col_n, col_svc in self.columns.items()
        }
    }

Inherited members

class AlterTableDropColumns (*, columns: list[str])

An object representing the alter-table operation of dropping column(s), for use as argument to the table's alter() method.

Attributes

columns
a list of the column names to drop.
Expand source code
@dataclass
class AlterTableDropColumns(AlterTableOperation):
    """
    An object representing the alter-table operation of dropping column(s),
    for use as argument to the table's `alter()` method.

    Attributes:
        columns: a list of the column names to drop.
    """

    columns: list[str]

    def __init__(self, *, columns: list[str]) -> None:
        self._name = "drop"
        self.columns = columns

    def __repr__(self) -> str:
        _col_desc = f"columns=[{','.join(self.columns)}]"
        return f"{self.__class__.__name__}({_col_desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""
        return {
            "columns": self.columns,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableDropColumns:
        """
        Create an instance of AlterTableDropColumns from a dictionary
        such as one suitable as (partial) command payload.
        """
        _warn_residual_keys(cls, raw_dict, {"columns"})
        return AlterTableDropColumns(
            columns=raw_dict["columns"],
        )

    @classmethod
    def coerce(
        cls, raw_input: AlterTableDropColumns | dict[str, Any]
    ) -> AlterTableDropColumns:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into an AlterTableDropColumns.
        """

        if isinstance(raw_input, AlterTableDropColumns):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Ancestors

Class variables

var columns : list[str]

Static methods

def coerce(raw_input: AlterTableDropColumns | dict[str, Any]) ‑> AlterTableDropColumns

Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropColumns.

Expand source code
@classmethod
def coerce(
    cls, raw_input: AlterTableDropColumns | dict[str, Any]
) -> AlterTableDropColumns:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into an AlterTableDropColumns.
    """

    if isinstance(raw_input, AlterTableDropColumns):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""
    return {
        "columns": self.columns,
    }

Inherited members

class AlterTableDropVectorize (*, columns: list[str])

An object representing the alter-table operation of removing the vectorize service (i.e. the server-side embedding computation) from one or more columns, for use as argument to the table's alter() method.

Note: this operation does not drop the column, simply unsets its vectorize service. Existing embedding vectors, stored in the table, are retained.

Attributes

columns
a list of the column names whose vectorize service is to be removed.
Expand source code
@dataclass
class AlterTableDropVectorize(AlterTableOperation):
    """
    An object representing the alter-table operation of removing the vectorize
    service (i.e. the server-side embedding computation) from one or more columns,
    for use as argument to the table's `alter()` method.

    Note: this operation does not drop the column, simply unsets its vectorize
    service. Existing embedding vectors, stored in the table, are retained.

    Attributes:
        columns: a list of the column names whose vectorize service is to be removed.
    """

    columns: list[str]

    def __init__(self, *, columns: list[str]) -> None:
        self._name = "dropVectorize"
        self.columns = columns

    def __repr__(self) -> str:
        _col_desc = f"columns=[{','.join(self.columns)}]"
        return f"{self.__class__.__name__}({_col_desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""
        return {
            "columns": self.columns,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableDropVectorize:
        """
        Create an instance of AlterTableDropVectorize from a dictionary
        such as one suitable as (partial) command payload.
        """
        _warn_residual_keys(cls, raw_dict, {"columns"})
        return AlterTableDropVectorize(
            columns=raw_dict["columns"],
        )

    @classmethod
    def coerce(
        cls, raw_input: AlterTableDropVectorize | dict[str, Any]
    ) -> AlterTableDropVectorize:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into an AlterTableDropVectorize.
        """

        if isinstance(raw_input, AlterTableDropVectorize):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Ancestors

Class variables

var columns : list[str]

Static methods

def coerce(raw_input: AlterTableDropVectorize | dict[str, Any]) ‑> AlterTableDropVectorize

Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropVectorize.

Expand source code
@classmethod
def coerce(
    cls, raw_input: AlterTableDropVectorize | dict[str, Any]
) -> AlterTableDropVectorize:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into an AlterTableDropVectorize.
    """

    if isinstance(raw_input, AlterTableDropVectorize):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""
    return {
        "columns": self.columns,
    }

Inherited members

class AstraDBAdminDatabaseInfo (*, environment: str, raw_dict: dict[str, Any])

A class representing the information of an Astra DB database, including region details. This is the type of the response from the AstraDBDatabaseAdmin info method.

Note

This class, if applicable, describes a multi-region database in all its regions, as opposed to the AstraDBDatabaseInfo.

Attributes

id
the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef".
name
the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org.
keyspaces
A list of the keyspaces available in the database.
status
A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses).
environment
a string identifying the environment for the database. In the typical usage, this equals "prod".
cloud_provider
a string describing the cloud provider hosting the database.
raw
a dictionary containing the full response from the DevOps API call to obtain the database information.
created_at
information about when the database has been created.
last_used
information about when the database was accessed last.
org_id
the ID of the Astra organization the database belongs to, in the form of a UUID string with dashes.
owner_id
the ID of the Astra account owning the database, in the form of a UUID string with dashes.
regions
a list of AstraDBAdminDatabaseRegionInfo objects, one for each of the regions the database is replicated to.

Note

The raw_info dictionary usually has a region key describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible that database_info.region != database_info.raw_info["region"]. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when calling Admin.list_databases(), all fields except environment (e.g. keyspace, region, etc) are set as found on the DevOps API response directly.

Expand source code
@dataclass
class AstraDBAdminDatabaseInfo(_BaseAstraDBDatabaseInfo):
    """
    A class representing the information of an Astra DB database, including
    region details. This is the type of the response from the AstraDBDatabaseAdmin
    `info` method.

    Note:
        This class, if applicable, describes a multi-region database in all its
        regions, as opposed to the `AstraDBDatabaseInfo`.

    Attributes:
        id: the Database ID, in the form of a UUID string with dashes. Example:
            "01234567-89ab-cdef-0123-456789abcdef".
        name: the name of the database as set by the user at creation time.
            The database name is not necessarily unique across databases in an org.
        keyspaces: A list of the keyspaces available in the database.
        status: A string describing the current status of the database. Example values
            are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see
            the DevOps API documentation for more on database statuses).
        environment: a string identifying the environment for the database. In the
            typical usage, this equals "prod".
        cloud_provider: a string describing the cloud provider hosting the database.
        raw: a dictionary containing the full response from the DevOps API call
            to obtain the database information.
        created_at: information about when the database has been created.
        last_used: information about when the database was accessed last.
        org_id: the ID of the Astra organization the database belongs to,
            in the form of a UUID string with dashes.
        owner_id: the ID of the Astra account owning the database, in the form
            of a UUID string with dashes.
        regions: a list of `AstraDBAdminDatabaseRegionInfo` objects, one for each of
            the regions the database is replicated to.

    Note:
        The `raw_info` dictionary usually has a `region` key describing
        the default region as configured in the database, which does not
        necessarily (for multi-region databases) match the region through
        which the connection is established: the latter is the one specified
        by the "api endpoint" used for connecting. In other words, for multi-region
        databases it is possible that
        `database_info.region != database_info.raw_info["region"]`.
        Conversely, in case of a AstraDBDatabaseInfo not obtained through a
        connected database, such as when calling `Admin.list_databases()`,
        all fields except `environment` (e.g. keyspace, region, etc)
        are set as found on the DevOps API response directly.
    """

    created_at: datetime.datetime | None
    last_used: datetime.datetime | None
    org_id: str
    owner_id: str
    regions: list[AstraDBAdminDatabaseRegionInfo]

    def __init__(
        self,
        *,
        environment: str,
        raw_dict: dict[str, Any],
    ) -> None:
        self.created_at = _failsafe_parse_date(raw_dict.get("creationTime"))
        self.last_used = _failsafe_parse_date(raw_dict.get("lastUsageTime"))
        self.org_id = raw_dict["orgId"]
        self.owner_id = raw_dict["ownerId"]
        _BaseAstraDBDatabaseInfo.__init__(
            self=self,
            environment=environment,
            raw_dict=raw_dict,
        )
        self.regions = [
            AstraDBAdminDatabaseRegionInfo(
                raw_datacenter_dict=raw_datacenter_dict,
                environment=environment,
                database_id=self.id,
            )
            for raw_datacenter_dict in raw_dict["info"]["datacenters"]
        ]

    def __repr__(self) -> str:
        pieces = [
            _BaseAstraDBDatabaseInfo._inner_desc(self),
            f"created_at={self.created_at}",
            f"last_used={self.last_used}",
            f"org_id={self.org_id}",
            f"owner_id={self.owner_id}",
            f"regions={self.regions}",
            "raw=...",
        ]
        return f"{self.__class__.__name__}({', '.join(pieces)})"

Ancestors

  • astrapy.data.info.database_info._BaseAstraDBDatabaseInfo

Class variables

var created_at : datetime.datetime | None
var last_used : datetime.datetime | None
var org_id : str
var owner_id : str
var regions : list[AstraDBAdminDatabaseRegionInfo]
class AstraDBDatabaseInfo (*, environment: str, api_endpoint: str, raw_dict: dict[str, Any])

A class representing the information of an Astra DB database, including region details. This is the type of the response from the Database info method.

Note

a database can in general be replicated across multiple regions, in an active/active manner. Yet, when connecting to it, one always explicitly specifies a certain region: in other words, the connection (as represented by the Database class and analogous) is always done to a specific region. In this sense, this class represents the notion of "a database reached from a certain region". See class AstraDBAdminDatabaseInfo for (possibly) multi-region database information.

Attributes

id
the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef".
name
the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org.
keyspaces
A list of the keyspaces available in the database.
status
A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses).
environment
a string identifying the environment for the database. In the typical usage, this equals "prod".
cloud_provider
a string describing the cloud provider hosting the database.
raw
a dictionary containing the full response from the DevOps API call to obtain the database information.
region
the region this database is accessed through.
api_endpoint
the API Endpoint used to connect to this database in this region.

Note

The raw_info dictionary usually has a region key describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible that database_info.region != database_info.raw_info["region"]. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when calling Admin.list_databases(), all fields except environment (e.g. keyspace, region, etc) are set as found on the DevOps API response directly.

Expand source code
@dataclass
class AstraDBDatabaseInfo(_BaseAstraDBDatabaseInfo):
    """
    A class representing the information of an Astra DB database, including
    region details. This is the type of the response from the Database `info`
    method.

    Note:
        a database can in general be replicated across multiple regions, in an
        active/active manner. Yet, when connecting to it, one always explicitly
        specifies a certain region: in other words, the connection (as represented
        by the `Database` class and analogous) is always done to a specific region.
        In this sense, this class represents the notion of "a database reached from
        a certain region". See class `AstraDBAdminDatabaseInfo` for (possibly)
        multi-region database information.

    Attributes:
        id: the Database ID, in the form of a UUID string with dashes. Example:
            "01234567-89ab-cdef-0123-456789abcdef".
        name: the name of the database as set by the user at creation time.
            The database name is not necessarily unique across databases in an org.
        keyspaces: A list of the keyspaces available in the database.
        status: A string describing the current status of the database. Example values
            are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see
            the DevOps API documentation for more on database statuses).
        environment: a string identifying the environment for the database. In the
            typical usage, this equals "prod".
        cloud_provider: a string describing the cloud provider hosting the database.
        raw: a dictionary containing the full response from the DevOps API call
            to obtain the database information.
        region: the region this database is accessed through.
        api_endpoint: the API Endpoint used to connect to this database in this region.

    Note:
        The `raw_info` dictionary usually has a `region` key describing
        the default region as configured in the database, which does not
        necessarily (for multi-region databases) match the region through
        which the connection is established: the latter is the one specified
        by the "api endpoint" used for connecting. In other words, for multi-region
        databases it is possible that
        `database_info.region != database_info.raw_info["region"]`.
        Conversely, in case of a AstraDBDatabaseInfo not obtained through a
        connected database, such as when calling `Admin.list_databases()`,
        all fields except `environment` (e.g. keyspace, region, etc)
        are set as found on the DevOps API response directly.
    """

    region: str
    api_endpoint: str

    def __init__(
        self,
        *,
        environment: str,
        api_endpoint: str,
        raw_dict: dict[str, Any],
    ) -> None:
        self.api_endpoint = api_endpoint
        parsed_api_endpoint = parse_api_endpoint(self.api_endpoint)
        self.region = "" if parsed_api_endpoint is None else parsed_api_endpoint.region
        _BaseAstraDBDatabaseInfo.__init__(
            self=self,
            environment=environment,
            raw_dict=raw_dict,
        )

    def __repr__(self) -> str:
        pieces = [
            _BaseAstraDBDatabaseInfo._inner_desc(self),
            f"region={self.region}",
            f"api_endpoint={self.api_endpoint}",
            "raw=...",
        ]
        return f"{self.__class__.__name__}({', '.join(pieces)})"

Ancestors

  • astrapy.data.info.database_info._BaseAstraDBDatabaseInfo

Class variables

var api_endpoint : str
var region : str
class CollectionDefaultIDOptions (default_id_type: str)

The "defaultId" component of the collection options. See the Data API specifications for allowed values.

Attributes

default_id_type
this setting determines what type of IDs the Data API will generate when inserting documents that do not specify their _id field explicitly. Can be set to any of the values DefaultIdType.UUID, DefaultIdType.OBJECTID, DefaultIdType.UUIDV6, DefaultIdType.UUIDV7, DefaultIdType.DEFAULT.
Expand source code
@dataclass
class CollectionDefaultIDOptions:
    """
    The "defaultId" component of the collection options.
    See the Data API specifications for allowed values.

    Attributes:
        default_id_type: this setting determines what type of IDs the Data API will
            generate when inserting documents that do not specify their
            `_id` field explicitly. Can be set to any of the values
            `DefaultIdType.UUID`, `DefaultIdType.OBJECTID`,
            `DefaultIdType.UUIDV6`, `DefaultIdType.UUIDV7`,
            `DefaultIdType.DEFAULT`.
    """

    default_id_type: str

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {"type": self.default_id_type}

    @staticmethod
    def _from_dict(
        raw_dict: dict[str, Any] | None,
    ) -> CollectionDefaultIDOptions | None:
        """
        Create an instance of CollectionDefaultIDOptions from a dictionary
        such as one from the Data API.
        """

        if raw_dict is not None:
            return CollectionDefaultIDOptions(default_id_type=raw_dict["type"])
        else:
            return None

Class variables

var default_id_type : str

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {"type": self.default_id_type}
class CollectionDefinition (vector: CollectionVectorOptions | None = None, indexing: dict[str, Any] | None = None, default_id: CollectionDefaultIDOptions | None = None)

A structure expressing the options of a collection. See the Data API specifications for detailed specification and allowed values.

Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class. See the examples below and the Table documentation for more details.

Attributes

vector
an optional CollectionVectorOptions object.
indexing
an optional dictionary with the "indexing" collection properties. This is in the form of a dictionary such as {"deny": [...]} or {"allow": [...]}, with a list of document paths, or alternatively just ["*"], to exclude from/include in collection indexing, respectively.
default_id
an optional CollectionDefaultIDOptions object (see).

Example

>>> from astrapy.constants import VectorMetric
>>> from astrapy.info import CollectionDefinition, CollectionVectorOptions
>>>
>>> # Create a collection definition with the fluent interface:
>>> collection_definition = (
...     CollectionDefinition.builder()
...     .set_vector_dimension(3)
...     .set_vector_metric(VectorMetric.DOT_PRODUCT)
...     .set_indexing("deny", ["annotations", "logs"])
...     .build()
... )
>>>
>>> # Create a collection definition passing everything to the constructor:
>>> collection_definition_1 = CollectionDefinition(
...     vector=CollectionVectorOptions(
...         dimension=3,
...         metric=VectorMetric.DOT_PRODUCT,
...     ),
...     indexing={"deny": ["annotations", "logs"]},
... )
>>>
>>> # Coerce a dictionary into a collection definition:
>>> collection_definition_2_dict = {
...     "indexing": {"deny": ["annotations", "logs"]},
...     "vector": {
...         "dimension": 3,
...         "metric": VectorMetric.DOT_PRODUCT,
...     },
... }
>>> collection_definition_2 = CollectionDefinition.coerce(
...     collection_definition_2_dict
... )
>>>
>>> # The three created objects are exactly identical:
>>> collection_definition_2 == collection_definition_1
True
>>> collection_definition_2 == collection_definition
True
Expand source code
@dataclass
class CollectionDefinition:
    """
    A structure expressing the options of a collection.
    See the Data API specifications for detailed specification and allowed values.

    Instances of this object can be created in three ways: using a fluent interface,
    passing a fully-formed definition to the class constructor, or coercing an
    appropriately-shaped plain dictionary into this class.
    See the examples below and the Table documentation for more details.

    Attributes:
        vector: an optional CollectionVectorOptions object.
        indexing: an optional dictionary with the "indexing" collection properties.
            This is in the form of a dictionary such as `{"deny": [...]}`
            or `{"allow": [...]}`, with a list of document paths, or alternatively
            just `["*"]`, to exclude from/include in collection indexing,
            respectively.
        default_id: an optional CollectionDefaultIDOptions object (see).

    Example:
        >>> from astrapy.constants import VectorMetric
        >>> from astrapy.info import CollectionDefinition, CollectionVectorOptions
        >>>
        >>> # Create a collection definition with the fluent interface:
        >>> collection_definition = (
        ...     CollectionDefinition.builder()
        ...     .set_vector_dimension(3)
        ...     .set_vector_metric(VectorMetric.DOT_PRODUCT)
        ...     .set_indexing("deny", ["annotations", "logs"])
        ...     .build()
        ... )
        >>>
        >>> # Create a collection definition passing everything to the constructor:
        >>> collection_definition_1 = CollectionDefinition(
        ...     vector=CollectionVectorOptions(
        ...         dimension=3,
        ...         metric=VectorMetric.DOT_PRODUCT,
        ...     ),
        ...     indexing={"deny": ["annotations", "logs"]},
        ... )
        >>>
        >>> # Coerce a dictionary into a collection definition:
        >>> collection_definition_2_dict = {
        ...     "indexing": {"deny": ["annotations", "logs"]},
        ...     "vector": {
        ...         "dimension": 3,
        ...         "metric": VectorMetric.DOT_PRODUCT,
        ...     },
        ... }
        >>> collection_definition_2 = CollectionDefinition.coerce(
        ...     collection_definition_2_dict
        ... )
        >>>
        >>> # The three created objects are exactly identical:
        >>> collection_definition_2 == collection_definition_1
        True
        >>> collection_definition_2 == collection_definition
        True
    """

    vector: CollectionVectorOptions | None = None
    indexing: dict[str, Any] | None = None
    default_id: CollectionDefaultIDOptions | None = None

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                None if self.vector is None else f"vector={self.vector.__repr__()}",
                (
                    None
                    if self.indexing is None
                    else f"indexing={self.indexing.__repr__()}"
                ),
                (
                    None
                    if self.default_id is None
                    else f"default_id={self.default_id.__repr__()}"
                ),
            ]
            if pc is not None
        ]
        return f"{self.__class__.__name__}({', '.join(not_null_pieces)})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "vector": None if self.vector is None else self.vector.as_dict(),
                "indexing": self.indexing,
                "defaultId": (
                    None if self.default_id is None else self.default_id.as_dict()
                ),
            }.items()
            if v is not None
            if v != {}
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> CollectionDefinition:
        """
        Create an instance of CollectionDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"vector", "indexing", "defaultId"})
        return CollectionDefinition(
            vector=CollectionVectorOptions._from_dict(raw_dict.get("vector")),
            indexing=raw_dict.get("indexing"),
            default_id=CollectionDefaultIDOptions._from_dict(raw_dict.get("defaultId")),
        )

    @classmethod
    def coerce(
        cls, raw_input: CollectionDefinition | dict[str, Any]
    ) -> CollectionDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a CollectionDefinition.
        """

        if isinstance(raw_input, CollectionDefinition):
            return raw_input
        else:
            return cls._from_dict(raw_input)

    @staticmethod
    def builder() -> CollectionDefinition:
        """
        Create an "empty" builder for constructing a collection definition through
        a fluent interface. The resulting object has no defined properties,
        traits that can be added progressively with the corresponding methods.

        See the class docstring for a full example on using the fluent interface.

        Returns:
            a CollectionDefinition for the simplest possible creatable collection.
        """

        return CollectionDefinition()

    def set_indexing(
        self, indexing_mode: str | None, indexing_target: list[str] | None = None
    ) -> CollectionDefinition:
        """
        Return a new collection definition object with a new indexing setting.
        The indexing can be set to something (fully overwriting any pre-existing
        configuration), or removed entirely. This method is for use within the
        fluent interface for progressively building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            indexing_mode: one of "allow" or "deny" to configure indexing, or None
                in case one wants to remove the setting.
            indexing_target: a list of the document paths covered by the allow/deny
                prescription. Passing this parameter when `indexing_mode` is None
                results in an error.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            indexing setting to this collection definition.
        """

        if indexing_mode is None:
            if indexing_target is not None:
                raise ValueError("Cannot pass an indexing target if unsetting indexing")
            return CollectionDefinition(
                vector=self.vector,
                indexing=None,
                default_id=self.default_id,
            )
        _i_mode = indexing_mode.lower()
        if _i_mode not in INDEXING_ALLOWED_MODES:
            msg = (
                f"Unknown indexing mode: '{indexing_mode}'. "
                f"Allowed values are: {', '.join(INDEXING_ALLOWED_MODES)}."
            )
            raise ValueError(msg)
        _i_target: list[str] = indexing_target or []
        return CollectionDefinition(
            vector=self.vector,
            indexing={indexing_mode: indexing_target},
            default_id=self.default_id,
        )

    def set_default_id(self, default_id_type: str | None) -> CollectionDefinition:
        """
        Return a new collection definition object with a new setting for the
        collection 'default ID type'. This method is for use within the
        fluent interface for progressively building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            default_id_type: one of the values of `astrapy.constants.DefaultIdType`
                (or the equivalent string) to set a default ID type for a collection;
                alternatively, None to remove the corresponding configuration.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            default ID type setting to this collection definition.
        """

        if default_id_type is None:
            return CollectionDefinition(
                vector=self.vector,
                indexing=self.indexing,
                default_id=None,
            )

        return CollectionDefinition(
            vector=self.vector,
            indexing=self.indexing,
            default_id=CollectionDefaultIDOptions(
                default_id_type=default_id_type,
            ),
        )

    def set_vector_dimension(self, dimension: int | None) -> CollectionDefinition:
        """
        Return a new collection definition object with a new setting for the
        collection's vector dimension. This method is for use within the
        fluent interface for progressively building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            dimension: an integer, the number of components of vectors in the
                collection. Setting even just one vector-related property makes
                the described collection a "vector collection".
                Providing None removes this setting.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            vector-related setting to this collection definition.
        """

        _vector_options = self.vector or CollectionVectorOptions()
        return CollectionDefinition(
            vector=CollectionVectorOptions(
                dimension=dimension,
                metric=_vector_options.metric,
                source_model=_vector_options.source_model,
                service=_vector_options.service,
            ),
            indexing=self.indexing,
            default_id=self.default_id,
        )

    def set_vector_metric(self, metric: str | None) -> CollectionDefinition:
        """
        Return a new collection definition object with a new setting for the
        collection's vector similarity metric. This method is for use within the
        fluent interface for progressively building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            metric: a value of those in `astrapy.constants.VectorMetric`, or an
                equivalent string such as "dot_product", used for vector search
                within the collection. Setting even just one vector-related property
                makes the described collection a "vector collection".
                Providing None removes this setting.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            vector-related setting to this collection definition.
        """

        _vector_options = self.vector or CollectionVectorOptions()
        return CollectionDefinition(
            vector=CollectionVectorOptions(
                dimension=_vector_options.dimension,
                metric=metric,
                source_model=_vector_options.source_model,
                service=_vector_options.service,
            ),
            indexing=self.indexing,
            default_id=self.default_id,
        )

    def set_vector_source_model(self, source_model: str | None) -> CollectionDefinition:
        """
        Return a new collection definition object with a new setting for the
        collection's vector 'source model' parameter. This method is for use within the
        fluent interface for progressively building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            source_model: an optional string setting for the vector index, to help
                it pick the set of parameters best suited to a specific embedding model.
                See the Data API documentation for more details.
                Setting even just one vector-related property makes the described
                collection a "vector collection". Providing None
                removes this setting - the Data API will use its defaults.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            vector-related setting to this collection definition.
        """

        _vector_options = self.vector or CollectionVectorOptions()
        return CollectionDefinition(
            vector=CollectionVectorOptions(
                dimension=_vector_options.dimension,
                metric=_vector_options.metric,
                source_model=source_model,
                service=_vector_options.service,
            ),
            indexing=self.indexing,
            default_id=self.default_id,
        )

    def set_vector_service(
        self,
        provider: str | VectorServiceOptions | None,
        model_name: str | None = None,
        *,
        authentication: dict[str, Any] | None = None,
        parameters: dict[str, Any] | None = None,
    ) -> CollectionDefinition:
        """
        Return a new collection definition object with a new setting for the
        collection's vectorize (i.e. server-side embeddings) service.
        This method is for use within the fluent interface for progressively
        building a complete collection definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            provider: this can be (1) a whole `VectorServiceOptions` object encoding
                all desired properties for a vectorize service; or (2) it can be None,
                to signify removal of the entire vectorize setting; alternatively,
                (3) it can be a string, the vectorize provider name as seen in the
                response from the database's `find_embedding_providers` method. In the
                latter case, the other parameters should also be provided as needed.
                See the examples below for an illustration of these usage patterns.
            model_name: a string, the name of the vectorize model to use (must be
                compatible with the chosen provider).
            authentication: a dictionary with the required authentication information
                if the vectorize makes use of secrets (API Keys) stored in the database
                Key Management System. See the Data API for more information on
                storing an API Key secret in one's Astra DB account.
            parameters: a free-form key-value mapping providing additional,
                model-dependent configuration settings. The allowed parameters for
                a given model are specified in the response of the Database
                `find_embedding_providers` method.

        Returns:
            a CollectionDefinition obtained by adding (or replacing) the desired
            vector-related setting to this collection definition.

        Example:
            >>> from astrapy.info import CollectionDefinition, VectorServiceOptions
            >>>
            >>> zero = CollectionDefinition.builder()
            >>>
            >>> svc1 = zero.set_vector_service(
            ...     "myProvider",
            ...     "myModelName",
            ...     parameters={"p": "z"},
            ... )
            >>> print(svc1.build().as_dict())
            {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
            >>>
            >>> myVecSvcOpt = VectorServiceOptions(
            ...     provider="myProvider",
            ...     model_name="myModelName",
            ...     parameters={"p": "z"},
            ... )
            >>> svc2 = zero.set_vector_service(myVecSvcOpt).build()
            >>> print(svc2.as_dict())
            {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
            >>>
            >>> reset = svc1.set_vector_service(None).build()
            >>> print(reset.as_dict())
            {}
        """

        _vector_options = self.vector or CollectionVectorOptions()
        if isinstance(provider, VectorServiceOptions):
            if (
                model_name is not None
                or authentication is not None
                or parameters is not None
            ):
                msg = (
                    "Parameters 'model_name', 'authentication' and 'parameters' "
                    "cannot be passed when setting a VectorServiceOptions directly."
                )
                raise ValueError(msg)
            return CollectionDefinition(
                vector=CollectionVectorOptions(
                    dimension=_vector_options.dimension,
                    metric=_vector_options.metric,
                    source_model=_vector_options.source_model,
                    service=provider,
                ),
                indexing=self.indexing,
                default_id=self.default_id,
            )
        else:
            new_service: VectorServiceOptions | None
            if provider is None:
                if (
                    model_name is not None
                    or authentication is not None
                    or parameters is not None
                ):
                    msg = (
                        "Parameters 'model_name', 'authentication' and 'parameters' "
                        "cannot be passed when unsetting the vector service."
                    )
                    raise ValueError(msg)
                new_service = None
            else:
                new_service = VectorServiceOptions(
                    provider=provider,
                    model_name=model_name,
                    authentication=authentication,
                    parameters=parameters,
                )
            return CollectionDefinition(
                vector=CollectionVectorOptions(
                    dimension=_vector_options.dimension,
                    metric=_vector_options.metric,
                    source_model=_vector_options.source_model,
                    service=new_service,
                ),
                indexing=self.indexing,
                default_id=self.default_id,
            )

    def build(self) -> CollectionDefinition:
        """
        The final step in the fluent (builder) interface. Calling this method
        finalizes the definition that has been built so far and makes it into a
        collection definition ready for use in e.g. table creation.

        Note that this step may be automatically invoked by the receiving methods:
        however it is a good practice - and also adds to the readability of the code -
        to call it explicitly.

        See the class docstring for a full example on using the fluent interface.

        Returns:
            a CollectionDefinition obtained by finalizing the definition being
                built so far.
        """

        return self

Class variables

var default_idCollectionDefaultIDOptions | None
var indexing : dict[str, typing.Any] | None
var vectorCollectionVectorOptions | None

Static methods

def builder() ‑> CollectionDefinition

Create an "empty" builder for constructing a collection definition through a fluent interface. The resulting object has no defined properties, traits that can be added progressively with the corresponding methods.

See the class docstring for a full example on using the fluent interface.

Returns

a CollectionDefinition for the simplest possible creatable collection.

Expand source code
@staticmethod
def builder() -> CollectionDefinition:
    """
    Create an "empty" builder for constructing a collection definition through
    a fluent interface. The resulting object has no defined properties,
    traits that can be added progressively with the corresponding methods.

    See the class docstring for a full example on using the fluent interface.

    Returns:
        a CollectionDefinition for the simplest possible creatable collection.
    """

    return CollectionDefinition()
def coerce(raw_input: CollectionDefinition | dict[str, Any]) ‑> CollectionDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: CollectionDefinition | dict[str, Any]
) -> CollectionDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a CollectionDefinition.
    """

    if isinstance(raw_input, CollectionDefinition):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "vector": None if self.vector is None else self.vector.as_dict(),
            "indexing": self.indexing,
            "defaultId": (
                None if self.default_id is None else self.default_id.as_dict()
            ),
        }.items()
        if v is not None
        if v != {}
    }
def build(self) ‑> CollectionDefinition

The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a collection definition ready for use in e.g. table creation.

Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly.

See the class docstring for a full example on using the fluent interface.

Returns

a CollectionDefinition obtained by finalizing the definition being built so far.

Expand source code
def build(self) -> CollectionDefinition:
    """
    The final step in the fluent (builder) interface. Calling this method
    finalizes the definition that has been built so far and makes it into a
    collection definition ready for use in e.g. table creation.

    Note that this step may be automatically invoked by the receiving methods:
    however it is a good practice - and also adds to the readability of the code -
    to call it explicitly.

    See the class docstring for a full example on using the fluent interface.

    Returns:
        a CollectionDefinition obtained by finalizing the definition being
            built so far.
    """

    return self
def set_default_id(self, default_id_type: str | None) ‑> CollectionDefinition

Return a new collection definition object with a new setting for the collection 'default ID type'. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

default_id_type
one of the values of DefaultIdType (or the equivalent string) to set a default ID type for a collection; alternatively, None to remove the corresponding configuration.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired default ID type setting to this collection definition.

Expand source code
def set_default_id(self, default_id_type: str | None) -> CollectionDefinition:
    """
    Return a new collection definition object with a new setting for the
    collection 'default ID type'. This method is for use within the
    fluent interface for progressively building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        default_id_type: one of the values of `astrapy.constants.DefaultIdType`
            (or the equivalent string) to set a default ID type for a collection;
            alternatively, None to remove the corresponding configuration.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        default ID type setting to this collection definition.
    """

    if default_id_type is None:
        return CollectionDefinition(
            vector=self.vector,
            indexing=self.indexing,
            default_id=None,
        )

    return CollectionDefinition(
        vector=self.vector,
        indexing=self.indexing,
        default_id=CollectionDefaultIDOptions(
            default_id_type=default_id_type,
        ),
    )
def set_indexing(self, indexing_mode: str | None, indexing_target: list[str] | None = None) ‑> CollectionDefinition

Return a new collection definition object with a new indexing setting. The indexing can be set to something (fully overwriting any pre-existing configuration), or removed entirely. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

indexing_mode
one of "allow" or "deny" to configure indexing, or None in case one wants to remove the setting.
indexing_target
a list of the document paths covered by the allow/deny prescription. Passing this parameter when indexing_mode is None results in an error.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired indexing setting to this collection definition.

Expand source code
def set_indexing(
    self, indexing_mode: str | None, indexing_target: list[str] | None = None
) -> CollectionDefinition:
    """
    Return a new collection definition object with a new indexing setting.
    The indexing can be set to something (fully overwriting any pre-existing
    configuration), or removed entirely. This method is for use within the
    fluent interface for progressively building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        indexing_mode: one of "allow" or "deny" to configure indexing, or None
            in case one wants to remove the setting.
        indexing_target: a list of the document paths covered by the allow/deny
            prescription. Passing this parameter when `indexing_mode` is None
            results in an error.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        indexing setting to this collection definition.
    """

    if indexing_mode is None:
        if indexing_target is not None:
            raise ValueError("Cannot pass an indexing target if unsetting indexing")
        return CollectionDefinition(
            vector=self.vector,
            indexing=None,
            default_id=self.default_id,
        )
    _i_mode = indexing_mode.lower()
    if _i_mode not in INDEXING_ALLOWED_MODES:
        msg = (
            f"Unknown indexing mode: '{indexing_mode}'. "
            f"Allowed values are: {', '.join(INDEXING_ALLOWED_MODES)}."
        )
        raise ValueError(msg)
    _i_target: list[str] = indexing_target or []
    return CollectionDefinition(
        vector=self.vector,
        indexing={indexing_mode: indexing_target},
        default_id=self.default_id,
    )
def set_vector_dimension(self, dimension: int | None) ‑> CollectionDefinition

Return a new collection definition object with a new setting for the collection's vector dimension. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

dimension
an integer, the number of components of vectors in the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.

Expand source code
def set_vector_dimension(self, dimension: int | None) -> CollectionDefinition:
    """
    Return a new collection definition object with a new setting for the
    collection's vector dimension. This method is for use within the
    fluent interface for progressively building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        dimension: an integer, the number of components of vectors in the
            collection. Setting even just one vector-related property makes
            the described collection a "vector collection".
            Providing None removes this setting.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        vector-related setting to this collection definition.
    """

    _vector_options = self.vector or CollectionVectorOptions()
    return CollectionDefinition(
        vector=CollectionVectorOptions(
            dimension=dimension,
            metric=_vector_options.metric,
            source_model=_vector_options.source_model,
            service=_vector_options.service,
        ),
        indexing=self.indexing,
        default_id=self.default_id,
    )
def set_vector_metric(self, metric: str | None) ‑> CollectionDefinition

Return a new collection definition object with a new setting for the collection's vector similarity metric. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

metric
a value of those in VectorMetric, or an equivalent string such as "dot_product", used for vector search within the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.

Expand source code
def set_vector_metric(self, metric: str | None) -> CollectionDefinition:
    """
    Return a new collection definition object with a new setting for the
    collection's vector similarity metric. This method is for use within the
    fluent interface for progressively building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        metric: a value of those in `astrapy.constants.VectorMetric`, or an
            equivalent string such as "dot_product", used for vector search
            within the collection. Setting even just one vector-related property
            makes the described collection a "vector collection".
            Providing None removes this setting.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        vector-related setting to this collection definition.
    """

    _vector_options = self.vector or CollectionVectorOptions()
    return CollectionDefinition(
        vector=CollectionVectorOptions(
            dimension=_vector_options.dimension,
            metric=metric,
            source_model=_vector_options.source_model,
            service=_vector_options.service,
        ),
        indexing=self.indexing,
        default_id=self.default_id,
    )
def set_vector_service(self, provider: str | VectorServiceOptions | None, model_name: str | None = None, *, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None) ‑> CollectionDefinition

Return a new collection definition object with a new setting for the collection's vectorize (i.e. server-side embeddings) service. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

provider
this can be (1) a whole VectorServiceOptions object encoding all desired properties for a vectorize service; or (2) it can be None, to signify removal of the entire vectorize setting; alternatively, (3) it can be a string, the vectorize provider name as seen in the response from the database's find_embedding_providers method. In the latter case, the other parameters should also be provided as needed. See the examples below for an illustration of these usage patterns.
model_name
a string, the name of the vectorize model to use (must be compatible with the chosen provider).
authentication
a dictionary with the required authentication information if the vectorize makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account.
parameters
a free-form key-value mapping providing additional, model-dependent configuration settings. The allowed parameters for a given model are specified in the response of the Database find_embedding_providers method.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.

Example

>>> from astrapy.info import CollectionDefinition, VectorServiceOptions
>>>
>>> zero = CollectionDefinition.builder()
>>>
>>> svc1 = zero.set_vector_service(
...     "myProvider",
...     "myModelName",
...     parameters={"p": "z"},
... )
>>> print(svc1.build().as_dict())
{'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
>>>
>>> myVecSvcOpt = VectorServiceOptions(
...     provider="myProvider",
...     model_name="myModelName",
...     parameters={"p": "z"},
... )
>>> svc2 = zero.set_vector_service(myVecSvcOpt).build()
>>> print(svc2.as_dict())
{'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
>>>
>>> reset = svc1.set_vector_service(None).build()
>>> print(reset.as_dict())
{}
Expand source code
def set_vector_service(
    self,
    provider: str | VectorServiceOptions | None,
    model_name: str | None = None,
    *,
    authentication: dict[str, Any] | None = None,
    parameters: dict[str, Any] | None = None,
) -> CollectionDefinition:
    """
    Return a new collection definition object with a new setting for the
    collection's vectorize (i.e. server-side embeddings) service.
    This method is for use within the fluent interface for progressively
    building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        provider: this can be (1) a whole `VectorServiceOptions` object encoding
            all desired properties for a vectorize service; or (2) it can be None,
            to signify removal of the entire vectorize setting; alternatively,
            (3) it can be a string, the vectorize provider name as seen in the
            response from the database's `find_embedding_providers` method. In the
            latter case, the other parameters should also be provided as needed.
            See the examples below for an illustration of these usage patterns.
        model_name: a string, the name of the vectorize model to use (must be
            compatible with the chosen provider).
        authentication: a dictionary with the required authentication information
            if the vectorize makes use of secrets (API Keys) stored in the database
            Key Management System. See the Data API for more information on
            storing an API Key secret in one's Astra DB account.
        parameters: a free-form key-value mapping providing additional,
            model-dependent configuration settings. The allowed parameters for
            a given model are specified in the response of the Database
            `find_embedding_providers` method.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        vector-related setting to this collection definition.

    Example:
        >>> from astrapy.info import CollectionDefinition, VectorServiceOptions
        >>>
        >>> zero = CollectionDefinition.builder()
        >>>
        >>> svc1 = zero.set_vector_service(
        ...     "myProvider",
        ...     "myModelName",
        ...     parameters={"p": "z"},
        ... )
        >>> print(svc1.build().as_dict())
        {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
        >>>
        >>> myVecSvcOpt = VectorServiceOptions(
        ...     provider="myProvider",
        ...     model_name="myModelName",
        ...     parameters={"p": "z"},
        ... )
        >>> svc2 = zero.set_vector_service(myVecSvcOpt).build()
        >>> print(svc2.as_dict())
        {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}}
        >>>
        >>> reset = svc1.set_vector_service(None).build()
        >>> print(reset.as_dict())
        {}
    """

    _vector_options = self.vector or CollectionVectorOptions()
    if isinstance(provider, VectorServiceOptions):
        if (
            model_name is not None
            or authentication is not None
            or parameters is not None
        ):
            msg = (
                "Parameters 'model_name', 'authentication' and 'parameters' "
                "cannot be passed when setting a VectorServiceOptions directly."
            )
            raise ValueError(msg)
        return CollectionDefinition(
            vector=CollectionVectorOptions(
                dimension=_vector_options.dimension,
                metric=_vector_options.metric,
                source_model=_vector_options.source_model,
                service=provider,
            ),
            indexing=self.indexing,
            default_id=self.default_id,
        )
    else:
        new_service: VectorServiceOptions | None
        if provider is None:
            if (
                model_name is not None
                or authentication is not None
                or parameters is not None
            ):
                msg = (
                    "Parameters 'model_name', 'authentication' and 'parameters' "
                    "cannot be passed when unsetting the vector service."
                )
                raise ValueError(msg)
            new_service = None
        else:
            new_service = VectorServiceOptions(
                provider=provider,
                model_name=model_name,
                authentication=authentication,
                parameters=parameters,
            )
        return CollectionDefinition(
            vector=CollectionVectorOptions(
                dimension=_vector_options.dimension,
                metric=_vector_options.metric,
                source_model=_vector_options.source_model,
                service=new_service,
            ),
            indexing=self.indexing,
            default_id=self.default_id,
        )
def set_vector_source_model(self, source_model: str | None) ‑> CollectionDefinition

Return a new collection definition object with a new setting for the collection's vector 'source model' parameter. This method is for use within the fluent interface for progressively building a complete collection definition.

See the class docstring for a full example on using the fluent interface.

Args

source_model
an optional string setting for the vector index, to help it pick the set of parameters best suited to a specific embedding model. See the Data API documentation for more details. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting - the Data API will use its defaults.

Returns

a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.

Expand source code
def set_vector_source_model(self, source_model: str | None) -> CollectionDefinition:
    """
    Return a new collection definition object with a new setting for the
    collection's vector 'source model' parameter. This method is for use within the
    fluent interface for progressively building a complete collection definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        source_model: an optional string setting for the vector index, to help
            it pick the set of parameters best suited to a specific embedding model.
            See the Data API documentation for more details.
            Setting even just one vector-related property makes the described
            collection a "vector collection". Providing None
            removes this setting - the Data API will use its defaults.

    Returns:
        a CollectionDefinition obtained by adding (or replacing) the desired
        vector-related setting to this collection definition.
    """

    _vector_options = self.vector or CollectionVectorOptions()
    return CollectionDefinition(
        vector=CollectionVectorOptions(
            dimension=_vector_options.dimension,
            metric=_vector_options.metric,
            source_model=source_model,
            service=_vector_options.service,
        ),
        indexing=self.indexing,
        default_id=self.default_id,
    )
class CollectionDescriptor (name: str, definition: CollectionDefinition, raw_descriptor: dict[str, Any] | None)

A structure expressing full description of a collection as the Data API returns it, i.e. its name and its definition.

Attributes

name
the name of the collection.
definition
a CollectionDefinition instance.
raw_descriptor
the raw response from the Data API.

Note

although the API format has the collection settings in a field called "options" (both in payloads and in responses, consistently), the corresponding attribute of this object is called definition to keep consistency with the TableDescriptor class and the attribute's data type (CollectionDefinition). As a consequence, when coercing a plain dictionary into this class, care must be taken that the plain dictionary key be "options", as could a response from the API have it.

Expand source code
@dataclass
class CollectionDescriptor:
    """
    A structure expressing full description of a collection as the Data API
    returns it, i.e. its name and its definition.

    Attributes:
        name: the name of the collection.
        definition: a CollectionDefinition instance.
        raw_descriptor: the raw response from the Data API.

    Note:
        although the API format has the collection settings in a field called
        "options" (both in payloads and in responses, consistently), the corresponding
        attribute of this object is called `definition` to keep consistency with the
        TableDescriptor class and the attribute's data type (`CollectionDefinition`).
        As a consequence, when coercing a plain dictionary into this class, care must
        be taken that the plain dictionary key be "options", as could a response from
        the API have it.
    """

    name: str
    definition: CollectionDefinition
    raw_descriptor: dict[str, Any] | None

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                f"name={self.name.__repr__()}",
                f"definition={self.definition.__repr__()}",
                None if self.raw_descriptor is None else "raw_descriptor=...",
            ]
            if pc is not None
        ]
        return f"{self.__class__.__name__}({', '.join(not_null_pieces)})"

    def __eq__(self, other: Any) -> bool:
        if isinstance(other, CollectionDescriptor):
            return self.name == other.name and self.definition == other.definition
        else:
            return False

    def as_dict(self) -> dict[str, Any]:
        """
        Recast this object into a dictionary.
        Empty `definition` will not be returned at all.
        """

        return {
            k: v
            for k, v in {
                "name": self.name,
                "options": self.definition.as_dict(),
            }.items()
            if v
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> CollectionDescriptor:
        """
        Create an instance of CollectionDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"name", "options"})
        return CollectionDescriptor(
            name=raw_dict["name"],
            definition=CollectionDefinition._from_dict(raw_dict.get("options") or {}),
            raw_descriptor=raw_dict,
        )

    @classmethod
    def coerce(
        cls, raw_input: CollectionDescriptor | dict[str, Any]
    ) -> CollectionDescriptor:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a CollectionDescriptor.
        """

        if isinstance(raw_input, CollectionDescriptor):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Class variables

var definitionCollectionDefinition
var name : str
var raw_descriptor : dict[str, typing.Any] | None

Static methods

def coerce(raw_input: CollectionDescriptor | dict[str, Any]) ‑> CollectionDescriptor

Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDescriptor.

Expand source code
@classmethod
def coerce(
    cls, raw_input: CollectionDescriptor | dict[str, Any]
) -> CollectionDescriptor:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a CollectionDescriptor.
    """

    if isinstance(raw_input, CollectionDescriptor):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary. Empty definition will not be returned at all.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """
    Recast this object into a dictionary.
    Empty `definition` will not be returned at all.
    """

    return {
        k: v
        for k, v in {
            "name": self.name,
            "options": self.definition.as_dict(),
        }.items()
        if v
    }
class CollectionInfo (database_info: AstraDBDatabaseInfo, keyspace: str, name: str, full_name: str)

Represents the identifying information for a collection, including the information about the database the collection belongs to.

Attributes

database_info
an AstraDBDatabaseInfo instance for the underlying database.
keyspace
the keyspace where the collection is located.
name
collection name. Unique within a keyspace (across tables/collections).
full_name
identifier for the collection within the database, in the form "keyspace.collection_name".
Expand source code
@dataclass
class CollectionInfo:
    """
    Represents the identifying information for a collection,
    including the information about the database the collection belongs to.

    Attributes:
        database_info: an AstraDBDatabaseInfo instance for the underlying database.
        keyspace: the keyspace where the collection is located.
        name: collection name. Unique within a keyspace (across tables/collections).
        full_name: identifier for the collection within the database,
            in the form "keyspace.collection_name".
    """

    database_info: AstraDBDatabaseInfo
    keyspace: str
    name: str
    full_name: str

Class variables

var database_infoAstraDBDatabaseInfo
var full_name : str
var keyspace : str
var name : str
class CollectionVectorOptions (dimension: int | None = None, metric: str | None = None, source_model: str | None = None, service: VectorServiceOptions | None = None)

The "vector" component of the collection options. See the Data API specifications for allowed values.

Attributes

dimension
an optional positive integer, the dimensionality of the vector space (i.e. the number of components in each vector).
metric
an optional choice of similarity metric to use in vector search. It must be a (string) value among VectorMetric.DOT_PRODUCT, VectorMetric.EUCLIDEAN and VectorMetric.COSINE.
source_model
based on this value, the vector index can tune itself so as to achieve optimal performance for a given embedding model. See the Data API documentation for the allowed values. Defaults to "other".
service
an optional VectorServiceOptions object in case a vectorize service is configured to achieve server-side embedding computation on the collection.
Expand source code
@dataclass
class CollectionVectorOptions:
    """
    The "vector" component of the collection options.
    See the Data API specifications for allowed values.

    Attributes:
        dimension: an optional positive integer, the dimensionality
            of the vector space (i.e. the number of components in each vector).
        metric: an optional choice of similarity metric to use in vector search.
            It must be a (string) value among `VectorMetric.DOT_PRODUCT`,
            `VectorMetric.EUCLIDEAN` and `VectorMetric.COSINE`.
        source_model: based on this value, the vector index can tune itself so as
            to achieve optimal performance for a given embedding model. See the
            Data API documentation for the allowed values. Defaults to "other".
        service: an optional VectorServiceOptions object in case a vectorize
            service is configured to achieve server-side embedding computation
            on the collection.
    """

    dimension: int | None = None
    metric: str | None = None
    source_model: str | None = None
    service: VectorServiceOptions | None = None

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "dimension": self.dimension,
                "metric": self.metric,
                "service": None if self.service is None else self.service.as_dict(),
                "sourceModel": None if self.source_model is None else self.source_model,
            }.items()
            if v is not None
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any] | None) -> CollectionVectorOptions | None:
        """
        Create an instance of CollectionVectorOptions from a dictionary
        such as one from the Data API.
        """

        if raw_dict is not None:
            return CollectionVectorOptions(
                dimension=raw_dict.get("dimension"),
                metric=raw_dict.get("metric"),
                source_model=raw_dict.get("sourceModel"),
                service=VectorServiceOptions._from_dict(raw_dict.get("service")),
            )
        else:
            return None

Class variables

var dimension : int | None
var metric : str | None
var serviceVectorServiceOptions | None
var source_model : str | None

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "dimension": self.dimension,
            "metric": self.metric,
            "service": None if self.service is None else self.service.as_dict(),
            "sourceModel": None if self.source_model is None else self.source_model,
        }.items()
        if v is not None
    }
class ColumnType (*args, **kwds)

Enum to describe the scalar column types for Tables.

A 'scalar' type is a non-composite type: that means, no sets, lists, maps and other non-primitive data types.

Expand source code
class ColumnType(StrEnum):
    """
    Enum to describe the scalar column types for Tables.

    A 'scalar' type is a non-composite type: that means, no sets, lists, maps
    and other non-primitive data types.
    """

    ASCII = "ascii"
    BIGINT = "bigint"
    BLOB = "blob"
    BOOLEAN = "boolean"
    DATE = "date"
    DECIMAL = "decimal"
    DOUBLE = "double"
    DURATION = "duration"
    FLOAT = "float"
    INET = "inet"
    INT = "int"
    SMALLINT = "smallint"
    TEXT = "text"
    TIME = "time"
    TIMESTAMP = "timestamp"
    TINYINT = "tinyint"
    UUID = "uuid"
    VARINT = "varint"

Ancestors

Class variables

var ASCII
var BIGINT
var BLOB
var BOOLEAN
var DATE
var DECIMAL
var DOUBLE
var DURATION
var FLOAT
var INET
var INT
var SMALLINT
var TEXT
var TIME
var TIMESTAMP
var TINYINT
var UUID
var VARINT

Inherited members

class CreateTableDefinition (columns: dict[str, TableColumnTypeDescriptor], primary_key: TablePrimaryKeyDescriptor)

A structure expressing the definition ("schema") of a table to be created through the Data API. This object is passed as the definition parameter to the database create_table method.

See the Data API specifications for detailed specification and allowed values.

Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class.

Attributes

columns
a map from column names to their type definition object.
primary_key
a specification of the primary key for the table.

Example

>>> from astrapy.constants import SortMode
>>> from astrapy.info import (
...     CreateTableDefinition,
...     TablePrimaryKeyDescriptor,
...     ColumnType,
...     TableScalarColumnTypeDescriptor,
...     TableValuedColumnType,
...     TableValuedColumnTypeDescriptor,
...     TableVectorColumnTypeDescriptor,
... )
>>>
>>> # Create a table definition with the fluent interface:
>>> table_definition = (
...     CreateTableDefinition.builder()
...     .add_column("match_id", ColumnType.TEXT)
...     .add_column("round", ColumnType.INT)
...     .add_vector_column("m_vector", dimension=3)
...     .add_column("score", ColumnType.INT)
...     .add_column("when", ColumnType.TIMESTAMP)
...     .add_column("winner", ColumnType.TEXT)
...     .add_set_column("fighters", ColumnType.UUID)
...     .add_partition_by(["match_id"])
...     .add_partition_sort({"round": SortMode.ASCENDING})
...     .build()
... )
>>>
>>> # Create a table definition passing everything to the constructor:
>>> table_definition_1 = CreateTableDefinition(
...     columns={
...         "match_id": TableScalarColumnTypeDescriptor(
...             ColumnType.TEXT,
...         ),
...         "round": TableScalarColumnTypeDescriptor(
...             ColumnType.INT,
...         ),
...         "m_vector": TableVectorColumnTypeDescriptor(
...             column_type="vector", dimension=3
...         ),
...         "score": TableScalarColumnTypeDescriptor(
...             ColumnType.INT,
...         ),
...         "when": TableScalarColumnTypeDescriptor(
...             ColumnType.TIMESTAMP,
...         ),
...         "winner": TableScalarColumnTypeDescriptor(
...             ColumnType.TEXT,
...         ),
...         "fighters": TableValuedColumnTypeDescriptor(
...             column_type=TableValuedColumnType.SET,
...             value_type=ColumnType.UUID,
...         ),
...     },
...     primary_key=TablePrimaryKeyDescriptor(
...         partition_by=["match_id"],
...         partition_sort={"round": SortMode.ASCENDING},
...     ),
... )
>>>
>>> # Coerce a dictionary into a table definition:
>>> table_definition_2_dict = {
...     "columns": {
...         "match_id": {"type": "text"},
...         "round": {"type": "int"},
...         "m_vector": {"type": "vector", "dimension": 3},
...         "score": {"type": "int"},
...         "when": {"type": "timestamp"},
...         "winner": {"type": "text"},
...         "fighters": {"type": "set", "valueType": "uuid"},
...     },
...     "primaryKey": {
...         "partitionBy": ["match_id"],
...         "partitionSort": {"round": 1},
...     },
... }
>>> table_definition_2 = CreateTableDefinition.coerce(
...     table_definition_2_dict
... )
>>>
>>> # The three created objects are exactly identical:
>>> table_definition_2 == table_definition_1
True
>>> table_definition_2 == table_definition
True
Expand source code
@dataclass
class CreateTableDefinition:
    """
    A structure expressing the definition ("schema") of a table to be created through
    the Data API. This object is passed as the `definition` parameter to the database
    `create_table` method.

    See the Data API specifications for detailed specification and allowed values.

    Instances of this object can be created in three ways: using a fluent interface,
    passing a fully-formed definition to the class constructor, or coercing an
    appropriately-shaped plain dictionary into this class.

    Attributes:
        columns: a map from column names to their type definition object.
        primary_key: a specification of the primary key for the table.

    Example:
        >>> from astrapy.constants import SortMode
        >>> from astrapy.info import (
        ...     CreateTableDefinition,
        ...     TablePrimaryKeyDescriptor,
        ...     ColumnType,
        ...     TableScalarColumnTypeDescriptor,
        ...     TableValuedColumnType,
        ...     TableValuedColumnTypeDescriptor,
        ...     TableVectorColumnTypeDescriptor,
        ... )
        >>>
        >>> # Create a table definition with the fluent interface:
        >>> table_definition = (
        ...     CreateTableDefinition.builder()
        ...     .add_column("match_id", ColumnType.TEXT)
        ...     .add_column("round", ColumnType.INT)
        ...     .add_vector_column("m_vector", dimension=3)
        ...     .add_column("score", ColumnType.INT)
        ...     .add_column("when", ColumnType.TIMESTAMP)
        ...     .add_column("winner", ColumnType.TEXT)
        ...     .add_set_column("fighters", ColumnType.UUID)
        ...     .add_partition_by(["match_id"])
        ...     .add_partition_sort({"round": SortMode.ASCENDING})
        ...     .build()
        ... )
        >>>
        >>> # Create a table definition passing everything to the constructor:
        >>> table_definition_1 = CreateTableDefinition(
        ...     columns={
        ...         "match_id": TableScalarColumnTypeDescriptor(
        ...             ColumnType.TEXT,
        ...         ),
        ...         "round": TableScalarColumnTypeDescriptor(
        ...             ColumnType.INT,
        ...         ),
        ...         "m_vector": TableVectorColumnTypeDescriptor(
        ...             column_type="vector", dimension=3
        ...         ),
        ...         "score": TableScalarColumnTypeDescriptor(
        ...             ColumnType.INT,
        ...         ),
        ...         "when": TableScalarColumnTypeDescriptor(
        ...             ColumnType.TIMESTAMP,
        ...         ),
        ...         "winner": TableScalarColumnTypeDescriptor(
        ...             ColumnType.TEXT,
        ...         ),
        ...         "fighters": TableValuedColumnTypeDescriptor(
        ...             column_type=TableValuedColumnType.SET,
        ...             value_type=ColumnType.UUID,
        ...         ),
        ...     },
        ...     primary_key=TablePrimaryKeyDescriptor(
        ...         partition_by=["match_id"],
        ...         partition_sort={"round": SortMode.ASCENDING},
        ...     ),
        ... )
        >>>
        >>> # Coerce a dictionary into a table definition:
        >>> table_definition_2_dict = {
        ...     "columns": {
        ...         "match_id": {"type": "text"},
        ...         "round": {"type": "int"},
        ...         "m_vector": {"type": "vector", "dimension": 3},
        ...         "score": {"type": "int"},
        ...         "when": {"type": "timestamp"},
        ...         "winner": {"type": "text"},
        ...         "fighters": {"type": "set", "valueType": "uuid"},
        ...     },
        ...     "primaryKey": {
        ...         "partitionBy": ["match_id"],
        ...         "partitionSort": {"round": 1},
        ...     },
        ... }
        >>> table_definition_2 = CreateTableDefinition.coerce(
        ...     table_definition_2_dict
        ... )
        >>>
        >>> # The three created objects are exactly identical:
        >>> table_definition_2 == table_definition_1
        True
        >>> table_definition_2 == table_definition
        True
    """

    columns: dict[str, TableColumnTypeDescriptor]
    primary_key: TablePrimaryKeyDescriptor

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                f"columns=[{','.join(self.columns.keys())}]",
                f"primary_key={self.primary_key}",
            ]
            if pc is not None
        ]
        return f"{self.__class__.__name__}({', '.join(not_null_pieces)})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "columns": {
                    col_n: col_v.as_dict() for col_n, col_v in self.columns.items()
                },
                "primaryKey": self.primary_key.as_dict(),
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> CreateTableDefinition:
        """
        Create an instance of CreateTableDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"columns", "primaryKey"})
        return CreateTableDefinition(
            columns={
                col_n: TableColumnTypeDescriptor.coerce(col_v)
                for col_n, col_v in raw_dict["columns"].items()
            },
            primary_key=TablePrimaryKeyDescriptor.coerce(raw_dict["primaryKey"]),
        )

    @classmethod
    def coerce(
        cls, raw_input: CreateTableDefinition | dict[str, Any]
    ) -> CreateTableDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a CreateTableDefinition.
        """

        if isinstance(raw_input, CreateTableDefinition):
            return raw_input
        else:
            return cls._from_dict(raw_input)

    @staticmethod
    def builder() -> CreateTableDefinition:
        """
        Create an "empty" builder for constructing a table definition through
        a fluent interface. The resulting object has no columns and no primary key,
        traits that are to be added progressively with the corresponding methods.

        Since it describes a "table with no columns at all", the result of
        this method alone is not an acceptable table definition for running a table
        creation method on a Database.

        See the class docstring for a full example on using the fluent interface.

        Returns:
            a CreateTableDefinition formally describing a table with no columns.
        """

        return CreateTableDefinition(
            columns={},
            primary_key=TablePrimaryKeyDescriptor(
                partition_by=[],
                partition_sort={},
            ),
        )

    def add_scalar_column(
        self, column_name: str, column_type: str | ColumnType
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of a scalar type (i.e. not a list, set or other composite type).
        This method is for use within the fluent interface for progressively
        building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            column_type: a string, or a `ColumnType` value, defining
                the scalar type for the column.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return CreateTableDefinition(
            columns={
                **self.columns,
                **{
                    column_name: TableScalarColumnTypeDescriptor(
                        column_type=ColumnType.coerce(column_type)
                    )
                },
            },
            primary_key=self.primary_key,
        )

    def add_column(
        self, column_name: str, column_type: str | ColumnType
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of a scalar type (i.e. not a list, set or other composite type).
        This method is for use within the fluent interface for progressively
        building a complete table definition.

        This method is an alias for `add_scalar_column`.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            column_type: a string, or a `ColumnType` value, defining
                the scalar type for the column.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return self.add_scalar_column(column_name=column_name, column_type=column_type)

    def add_set_column(
        self, column_name: str, value_type: str | ColumnType
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of 'set' type. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            value_type: a string, or a `ColumnType` value, defining
                the data type for the items in the set.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return CreateTableDefinition(
            columns={
                **self.columns,
                **{
                    column_name: TableValuedColumnTypeDescriptor(
                        column_type="set", value_type=value_type
                    )
                },
            },
            primary_key=self.primary_key,
        )

    def add_list_column(
        self, column_name: str, value_type: str | ColumnType
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of 'list' type. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            value_type: a string, or a `ColumnType` value, defining
                the data type for the items in the list.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return CreateTableDefinition(
            columns={
                **self.columns,
                **{
                    column_name: TableValuedColumnTypeDescriptor(
                        column_type="list", value_type=value_type
                    )
                },
            },
            primary_key=self.primary_key,
        )

    def add_map_column(
        self,
        column_name: str,
        key_type: str | ColumnType,
        value_type: str | ColumnType,
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of 'map' type. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            key_type: a string, or a `ColumnType` value, defining
                the data type for the keys in the map.
            value_type: a string, or a `ColumnType` value, defining
                the data type for the values in the map.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return CreateTableDefinition(
            columns={
                **self.columns,
                **{
                    column_name: TableKeyValuedColumnTypeDescriptor(
                        column_type="map", key_type=key_type, value_type=value_type
                    )
                },
            },
            primary_key=self.primary_key,
        )

    def add_vector_column(
        self,
        column_name: str,
        *,
        dimension: int | None = None,
        service: VectorServiceOptions | dict[str, Any] | None = None,
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with an added column
        of 'vector' type. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Args:
            column_name: the name of the new column to add to the definition.
            dimension: the dimensionality of the vector, i.e. the number of components
                each vector in this column will have. If a `service` parameter is
                supplied and the vectorize model allows for it, the dimension may be
                left unspecified to have the API set a default value.
                The Data API will raise an error if a table creation is attempted with
                a vector column for which neither a service nor the dimension are given.
            service: a `VectorServiceOptions` object, or an equivalent plain dictionary,
                defining the server-side embedding service associated to the column,
                if desired.

        Returns:
            a CreateTableDefinition obtained by adding (or replacing) the desired
            column to this table definition.
        """

        return CreateTableDefinition(
            columns={
                **self.columns,
                **{
                    column_name: TableVectorColumnTypeDescriptor(
                        column_type="vector",
                        dimension=dimension,
                        service=VectorServiceOptions.coerce(service),
                    )
                },
            },
            primary_key=self.primary_key,
        )

    def add_partition_by(
        self, partition_columns: list[str] | str
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with one or more added `partition_by`
        columns. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Successive calls append the requested columns at the end of the pre-existing
        `partition_by` list. In other words, these two patterns are equivalent:
        (1) X.add_partition_by(["col1", "col2"])
        (2) X.add_partition_by(["col1"]).add_partition_by("col2")

        Note that no deduplication is applied to the overall
        result: the caller should take care of not supplying the same column name
        more than once.

        Args:
            partition_columns: a list of column names (strings) to be added to the
                full table partition key. A single string (not a list) is also accepted.

        Returns:
            a CreateTableDefinition obtained by enriching the `partition_by`
            of this table definition as requested.
        """

        _partition_columns = (
            partition_columns
            if isinstance(partition_columns, list)
            else [partition_columns]
        )

        return CreateTableDefinition(
            columns=self.columns,
            primary_key=TablePrimaryKeyDescriptor(
                partition_by=self.primary_key.partition_by + _partition_columns,
                partition_sort=self.primary_key.partition_sort,
            ),
        )

    def add_partition_sort(
        self, partition_sort: dict[str, int]
    ) -> CreateTableDefinition:
        """
        Return a new table definition object with one or more added `partition_sort`
        column specifications. This method is for use within the
        fluent interface for progressively building a complete table definition.

        See the class docstring for a full example on using the fluent interface.

        Successive calls append (or replace) the requested columns at the end of
        the pre-existing `partition_sort` dictionary. In other words, these two
        patterns are equivalent:
        (1) X.add_partition_sort({"c1": 1, "c2": -1})
        (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1})

        Args:
            partition_sort: a dictoinary mapping column names to their sort mode
            (ascending/descending, i.e 1/-1. See also `astrapy.constants.SortMode`).

        Returns:
            a CreateTableDefinition obtained by enriching the `partition_sort`
            of this table definition as requested.
        """

        return CreateTableDefinition(
            columns=self.columns,
            primary_key=TablePrimaryKeyDescriptor(
                partition_by=self.primary_key.partition_by,
                partition_sort={**self.primary_key.partition_sort, **partition_sort},
            ),
        )

    def build(self) -> CreateTableDefinition:
        """
        The final step in the fluent (builder) interface. Calling this method
        finalizes the definition that has been built so far and makes it into a
        table definition ready for use in e.g. table creation.

        Note that this step may be automatically invoked by the receiving methods:
        however it is a good practice - and also adds to the readability of the code -
        to call it explicitly.

        See the class docstring for a full example on using the fluent interface.

        Returns:
            a CreateTableDefinition obtained by finalizing the definition being
                built so far.
        """

        return self

Class variables

var columns : dict[str, TableColumnTypeDescriptor]
var primary_keyTablePrimaryKeyDescriptor

Static methods

def builder() ‑> CreateTableDefinition

Create an "empty" builder for constructing a table definition through a fluent interface. The resulting object has no columns and no primary key, traits that are to be added progressively with the corresponding methods.

Since it describes a "table with no columns at all", the result of this method alone is not an acceptable table definition for running a table creation method on a Database.

See the class docstring for a full example on using the fluent interface.

Returns

a CreateTableDefinition formally describing a table with no columns.

Expand source code
@staticmethod
def builder() -> CreateTableDefinition:
    """
    Create an "empty" builder for constructing a table definition through
    a fluent interface. The resulting object has no columns and no primary key,
    traits that are to be added progressively with the corresponding methods.

    Since it describes a "table with no columns at all", the result of
    this method alone is not an acceptable table definition for running a table
    creation method on a Database.

    See the class docstring for a full example on using the fluent interface.

    Returns:
        a CreateTableDefinition formally describing a table with no columns.
    """

    return CreateTableDefinition(
        columns={},
        primary_key=TablePrimaryKeyDescriptor(
            partition_by=[],
            partition_sort={},
        ),
    )
def coerce(raw_input: CreateTableDefinition | dict[str, Any]) ‑> CreateTableDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a CreateTableDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: CreateTableDefinition | dict[str, Any]
) -> CreateTableDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a CreateTableDefinition.
    """

    if isinstance(raw_input, CreateTableDefinition):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def add_column(self, column_name: str, column_type: str | ColumnType) ‑> CreateTableDefinition

Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition.

This method is an alias for add_scalar_column.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
column_type
a string, or a ColumnType value, defining the scalar type for the column.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_column(
    self, column_name: str, column_type: str | ColumnType
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of a scalar type (i.e. not a list, set or other composite type).
    This method is for use within the fluent interface for progressively
    building a complete table definition.

    This method is an alias for `add_scalar_column`.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        column_type: a string, or a `ColumnType` value, defining
            the scalar type for the column.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return self.add_scalar_column(column_name=column_name, column_type=column_type)
def add_list_column(self, column_name: str, value_type: str | ColumnType) ‑> CreateTableDefinition

Return a new table definition object with an added column of 'list' type. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
value_type
a string, or a ColumnType value, defining the data type for the items in the list.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_list_column(
    self, column_name: str, value_type: str | ColumnType
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of 'list' type. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        value_type: a string, or a `ColumnType` value, defining
            the data type for the items in the list.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return CreateTableDefinition(
        columns={
            **self.columns,
            **{
                column_name: TableValuedColumnTypeDescriptor(
                    column_type="list", value_type=value_type
                )
            },
        },
        primary_key=self.primary_key,
    )
def add_map_column(self, column_name: str, key_type: str | ColumnType, value_type: str | ColumnType) ‑> CreateTableDefinition

Return a new table definition object with an added column of 'map' type. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
key_type
a string, or a ColumnType value, defining the data type for the keys in the map.
value_type
a string, or a ColumnType value, defining the data type for the values in the map.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_map_column(
    self,
    column_name: str,
    key_type: str | ColumnType,
    value_type: str | ColumnType,
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of 'map' type. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        key_type: a string, or a `ColumnType` value, defining
            the data type for the keys in the map.
        value_type: a string, or a `ColumnType` value, defining
            the data type for the values in the map.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return CreateTableDefinition(
        columns={
            **self.columns,
            **{
                column_name: TableKeyValuedColumnTypeDescriptor(
                    column_type="map", key_type=key_type, value_type=value_type
                )
            },
        },
        primary_key=self.primary_key,
    )
def add_partition_by(self, partition_columns: list[str] | str) ‑> CreateTableDefinition

Return a new table definition object with one or more added partition_by columns. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Successive calls append the requested columns at the end of the pre-existing partition_by list. In other words, these two patterns are equivalent: (1) X.add_partition_by(["col1", "col2"]) (2) X.add_partition_by(["col1"]).add_partition_by("col2")

Note that no deduplication is applied to the overall result: the caller should take care of not supplying the same column name more than once.

Args

partition_columns
a list of column names (strings) to be added to the full table partition key. A single string (not a list) is also accepted.

Returns

a CreateTableDefinition obtained by enriching the partition_by of this table definition as requested.

Expand source code
def add_partition_by(
    self, partition_columns: list[str] | str
) -> CreateTableDefinition:
    """
    Return a new table definition object with one or more added `partition_by`
    columns. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Successive calls append the requested columns at the end of the pre-existing
    `partition_by` list. In other words, these two patterns are equivalent:
    (1) X.add_partition_by(["col1", "col2"])
    (2) X.add_partition_by(["col1"]).add_partition_by("col2")

    Note that no deduplication is applied to the overall
    result: the caller should take care of not supplying the same column name
    more than once.

    Args:
        partition_columns: a list of column names (strings) to be added to the
            full table partition key. A single string (not a list) is also accepted.

    Returns:
        a CreateTableDefinition obtained by enriching the `partition_by`
        of this table definition as requested.
    """

    _partition_columns = (
        partition_columns
        if isinstance(partition_columns, list)
        else [partition_columns]
    )

    return CreateTableDefinition(
        columns=self.columns,
        primary_key=TablePrimaryKeyDescriptor(
            partition_by=self.primary_key.partition_by + _partition_columns,
            partition_sort=self.primary_key.partition_sort,
        ),
    )
def add_partition_sort(self, partition_sort: dict[str, int]) ‑> CreateTableDefinition

Return a new table definition object with one or more added partition_sort column specifications. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Successive calls append (or replace) the requested columns at the end of the pre-existing partition_sort dictionary. In other words, these two patterns are equivalent: (1) X.add_partition_sort({"c1": 1, "c2": -1}) (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1})

Args

partition_sort
a dictoinary mapping column names to their sort mode

(ascending/descending, i.e 1/-1. See also SortMode).

Returns

a CreateTableDefinition obtained by enriching the partition_sort of this table definition as requested.

Expand source code
def add_partition_sort(
    self, partition_sort: dict[str, int]
) -> CreateTableDefinition:
    """
    Return a new table definition object with one or more added `partition_sort`
    column specifications. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Successive calls append (or replace) the requested columns at the end of
    the pre-existing `partition_sort` dictionary. In other words, these two
    patterns are equivalent:
    (1) X.add_partition_sort({"c1": 1, "c2": -1})
    (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1})

    Args:
        partition_sort: a dictoinary mapping column names to their sort mode
        (ascending/descending, i.e 1/-1. See also `astrapy.constants.SortMode`).

    Returns:
        a CreateTableDefinition obtained by enriching the `partition_sort`
        of this table definition as requested.
    """

    return CreateTableDefinition(
        columns=self.columns,
        primary_key=TablePrimaryKeyDescriptor(
            partition_by=self.primary_key.partition_by,
            partition_sort={**self.primary_key.partition_sort, **partition_sort},
        ),
    )
def add_scalar_column(self, column_name: str, column_type: str | ColumnType) ‑> CreateTableDefinition

Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
column_type
a string, or a ColumnType value, defining the scalar type for the column.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_scalar_column(
    self, column_name: str, column_type: str | ColumnType
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of a scalar type (i.e. not a list, set or other composite type).
    This method is for use within the fluent interface for progressively
    building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        column_type: a string, or a `ColumnType` value, defining
            the scalar type for the column.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return CreateTableDefinition(
        columns={
            **self.columns,
            **{
                column_name: TableScalarColumnTypeDescriptor(
                    column_type=ColumnType.coerce(column_type)
                )
            },
        },
        primary_key=self.primary_key,
    )
def add_set_column(self, column_name: str, value_type: str | ColumnType) ‑> CreateTableDefinition

Return a new table definition object with an added column of 'set' type. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
value_type
a string, or a ColumnType value, defining the data type for the items in the set.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_set_column(
    self, column_name: str, value_type: str | ColumnType
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of 'set' type. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        value_type: a string, or a `ColumnType` value, defining
            the data type for the items in the set.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return CreateTableDefinition(
        columns={
            **self.columns,
            **{
                column_name: TableValuedColumnTypeDescriptor(
                    column_type="set", value_type=value_type
                )
            },
        },
        primary_key=self.primary_key,
    )
def add_vector_column(self, column_name: str, *, dimension: int | None = None, service: VectorServiceOptions | dict[str, Any] | None = None) ‑> CreateTableDefinition

Return a new table definition object with an added column of 'vector' type. This method is for use within the fluent interface for progressively building a complete table definition.

See the class docstring for a full example on using the fluent interface.

Args

column_name
the name of the new column to add to the definition.
dimension
the dimensionality of the vector, i.e. the number of components each vector in this column will have. If a service parameter is supplied and the vectorize model allows for it, the dimension may be left unspecified to have the API set a default value. The Data API will raise an error if a table creation is attempted with a vector column for which neither a service nor the dimension are given.
service
a VectorServiceOptions object, or an equivalent plain dictionary, defining the server-side embedding service associated to the column, if desired.

Returns

a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.

Expand source code
def add_vector_column(
    self,
    column_name: str,
    *,
    dimension: int | None = None,
    service: VectorServiceOptions | dict[str, Any] | None = None,
) -> CreateTableDefinition:
    """
    Return a new table definition object with an added column
    of 'vector' type. This method is for use within the
    fluent interface for progressively building a complete table definition.

    See the class docstring for a full example on using the fluent interface.

    Args:
        column_name: the name of the new column to add to the definition.
        dimension: the dimensionality of the vector, i.e. the number of components
            each vector in this column will have. If a `service` parameter is
            supplied and the vectorize model allows for it, the dimension may be
            left unspecified to have the API set a default value.
            The Data API will raise an error if a table creation is attempted with
            a vector column for which neither a service nor the dimension are given.
        service: a `VectorServiceOptions` object, or an equivalent plain dictionary,
            defining the server-side embedding service associated to the column,
            if desired.

    Returns:
        a CreateTableDefinition obtained by adding (or replacing) the desired
        column to this table definition.
    """

    return CreateTableDefinition(
        columns={
            **self.columns,
            **{
                column_name: TableVectorColumnTypeDescriptor(
                    column_type="vector",
                    dimension=dimension,
                    service=VectorServiceOptions.coerce(service),
                )
            },
        },
        primary_key=self.primary_key,
    )
def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "columns": {
                col_n: col_v.as_dict() for col_n, col_v in self.columns.items()
            },
            "primaryKey": self.primary_key.as_dict(),
        }.items()
        if v is not None
    }
def build(self) ‑> CreateTableDefinition

The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a table definition ready for use in e.g. table creation.

Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly.

See the class docstring for a full example on using the fluent interface.

Returns

a CreateTableDefinition obtained by finalizing the definition being built so far.

Expand source code
def build(self) -> CreateTableDefinition:
    """
    The final step in the fluent (builder) interface. Calling this method
    finalizes the definition that has been built so far and makes it into a
    table definition ready for use in e.g. table creation.

    Note that this step may be automatically invoked by the receiving methods:
    however it is a good practice - and also adds to the readability of the code -
    to call it explicitly.

    See the class docstring for a full example on using the fluent interface.

    Returns:
        a CreateTableDefinition obtained by finalizing the definition being
            built so far.
    """

    return self
class EmbeddingProvider (display_name: str | None, models: list[EmbeddingProviderModel], parameters: list[EmbeddingProviderParameter], supported_authentication: dict[str, EmbeddingProviderAuthentication], url: str | None)

A representation of an embedding provider, as returned by the 'findEmbeddingProviders' Data API endpoint.

Attributes

display_name
a version of the provider name for display and pretty printing. Not to be used when issuing vectorize API requests (for the latter, it is the key in the providers dictionary that is required).
models
a list of EmbeddingProviderModel objects pertaining to the provider.
parameters
a list of EmbeddingProviderParameter objects common to all models for this provider.
supported_authentication
a dictionary of the authentication modes for this provider. Note that disabled modes may still appear in this map, albeit with the enabled property set to False.
url
a string template for the URL used by the Data API when issuing the request toward the embedding provider. This is of no direct concern to the Data API user.
Expand source code
@dataclass
class EmbeddingProvider:
    """
    A representation of an embedding provider, as returned by the 'findEmbeddingProviders'
    Data API endpoint.

    Attributes:
        display_name: a version of the provider name for display and pretty printing.
            Not to be used when issuing vectorize API requests (for the latter, it is
            the key in the providers dictionary that is required).
        models: a list of `EmbeddingProviderModel` objects pertaining to the provider.
        parameters: a list of `EmbeddingProviderParameter` objects common to all models
            for this provider.
        supported_authentication: a dictionary of the authentication modes for
            this provider. Note that disabled modes may still appear in this map,
            albeit with the `enabled` property set to False.
        url: a string template for the URL used by the Data API when issuing the request
            toward the embedding provider. This is of no direct concern to the Data API user.
    """

    def __repr__(self) -> str:
        return f"EmbeddingProvider(display_name='{self.display_name}', models={self.models})"

    display_name: str | None
    models: list[EmbeddingProviderModel]
    parameters: list[EmbeddingProviderParameter]
    supported_authentication: dict[str, EmbeddingProviderAuthentication]
    url: str | None

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "displayName": self.display_name,
            "models": [model.as_dict() for model in self.models],
            "parameters": [parameter.as_dict() for parameter in self.parameters],
            "supportedAuthentication": {
                sa_name: sa_value.as_dict()
                for sa_name, sa_value in self.supported_authentication.items()
            },
            "url": self.url,
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProvider:
        """
        Create an instance of EmbeddingProvider from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "displayName",
            "models",
            "parameters",
            "supportedAuthentication",
            "url",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"an `EmbeddingProvider`: '{','.join(sorted(residual_keys))}'"
            )
        return EmbeddingProvider(
            display_name=raw_dict["displayName"],
            models=[
                EmbeddingProviderModel._from_dict(model_dict)
                for model_dict in raw_dict["models"]
            ],
            parameters=[
                EmbeddingProviderParameter._from_dict(param_dict)
                for param_dict in raw_dict["parameters"]
            ],
            supported_authentication={
                sa_name: EmbeddingProviderAuthentication._from_dict(sa_dict)
                for sa_name, sa_dict in raw_dict["supportedAuthentication"].items()
            },
            url=raw_dict["url"],
        )

Class variables

var display_name : str | None
var models : list[EmbeddingProviderModel]
var parameters : list[EmbeddingProviderParameter]
var supported_authentication : dict[str, EmbeddingProviderAuthentication]
var url : str | None

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "displayName": self.display_name,
        "models": [model.as_dict() for model in self.models],
        "parameters": [parameter.as_dict() for parameter in self.parameters],
        "supportedAuthentication": {
            sa_name: sa_value.as_dict()
            for sa_name, sa_value in self.supported_authentication.items()
        },
        "url": self.url,
    }
class EmbeddingProviderAuthentication (enabled: bool, tokens: list[EmbeddingProviderToken])

A representation of an authentication mode for using an embedding model, modeling the corresponding part of the response returned by the 'findEmbeddingProviders' Data API endpoint (namely "supportedAuthentication").

Attributes

enabled
whether this authentication mode is available for a given model.
tokens
a list of EmbeddingProviderToken objects, detailing the secrets required for the authentication mode.
Expand source code
@dataclass
class EmbeddingProviderAuthentication:
    """
    A representation of an authentication mode for using an embedding model,
    modeling the corresponding part of the response returned by the
    'findEmbeddingProviders' Data API endpoint (namely "supportedAuthentication").

    Attributes:
        enabled: whether this authentication mode is available for a given model.
        tokens: a list of `EmbeddingProviderToken` objects,
            detailing the secrets required for the authentication mode.
    """

    enabled: bool
    tokens: list[EmbeddingProviderToken]

    def __repr__(self) -> str:
        return (
            f"EmbeddingProviderAuthentication(enabled={self.enabled}, "
            f"tokens={','.join(str(token) for token in self.tokens)})"
        )

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "enabled": self.enabled,
            "tokens": [token.as_dict() for token in self.tokens],
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderAuthentication:
        """
        Create an instance of EmbeddingProviderAuthentication from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "enabled",
            "tokens",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"an `EmbeddingProviderAuthentication`: '{','.join(sorted(residual_keys))}'"
            )
        return EmbeddingProviderAuthentication(
            enabled=raw_dict["enabled"],
            tokens=[
                EmbeddingProviderToken._from_dict(token_dict)
                for token_dict in raw_dict["tokens"]
            ],
        )

Class variables

var enabled : bool
var tokens : list[EmbeddingProviderToken]

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "enabled": self.enabled,
        "tokens": [token.as_dict() for token in self.tokens],
    }
class EmbeddingProviderModel (name: str, parameters: list[EmbeddingProviderParameter], vector_dimension: int | None)

A representation of an embedding model as returned by the 'findEmbeddingProviders' Data API endpoint.

Attributes

name
the model name as must be passed when issuing vectorize operations to the API.
parameters
a list of the EmbeddingProviderParameter objects the model admits.
vector_dimension
an integer for the dimensionality of the embedding model. if this is None, the dimension can assume multiple values as specified by a corresponding parameter listed with the model.
Expand source code
@dataclass
class EmbeddingProviderModel:
    """
    A representation of an embedding model as returned by the 'findEmbeddingProviders'
    Data API endpoint.

    Attributes:
        name: the model name as must be passed when issuing
            vectorize operations to the API.
        parameters: a list of the `EmbeddingProviderParameter` objects the model admits.
        vector_dimension: an integer for the dimensionality of the embedding model.
            if this is None, the dimension can assume multiple values as specified
            by a corresponding parameter listed with the model.
    """

    name: str
    parameters: list[EmbeddingProviderParameter]
    vector_dimension: int | None

    def __repr__(self) -> str:
        return f"EmbeddingProviderModel(name='{self.name}')"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "name": self.name,
            "parameters": [parameter.as_dict() for parameter in self.parameters],
            "vectorDimension": self.vector_dimension,
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderModel:
        """
        Create an instance of EmbeddingProviderModel from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "name",
            "parameters",
            "vectorDimension",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"an `EmbeddingProviderModel`: '{','.join(sorted(residual_keys))}'"
            )
        return EmbeddingProviderModel(
            name=raw_dict["name"],
            parameters=[
                EmbeddingProviderParameter._from_dict(param_dict)
                for param_dict in raw_dict["parameters"]
            ],
            vector_dimension=raw_dict["vectorDimension"],
        )

Class variables

var name : str
var parameters : list[EmbeddingProviderParameter]
var vector_dimension : int | None

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "name": self.name,
        "parameters": [parameter.as_dict() for parameter in self.parameters],
        "vectorDimension": self.vector_dimension,
    }
class EmbeddingProviderParameter (default_value: Any, display_name: str | None, help: str | None, hint: str | None, name: str, required: bool, parameter_type: str, validation: dict[str, Any])

A representation of a parameter as returned by the 'findEmbeddingProviders' Data API endpoint.

Attributes

default_value
the default value for the parameter.
help
a textual description of the parameter.
name
the name to use when passing the parameter for vectorize operations.
required
whether the parameter is required or not.
parameter_type
a textual description of the data type for the parameter.
validation
a dictionary describing a parameter-specific validation policy.
Expand source code
@dataclass
class EmbeddingProviderParameter:
    """
    A representation of a parameter as returned by the 'findEmbeddingProviders'
    Data API endpoint.

    Attributes:
        default_value: the default value for the parameter.
        help: a textual description of the parameter.
        name: the name to use when passing the parameter for vectorize operations.
        required: whether the parameter is required or not.
        parameter_type: a textual description of the data type for the parameter.
        validation: a dictionary describing a parameter-specific validation policy.
    """

    default_value: Any
    display_name: str | None
    help: str | None
    hint: str | None
    name: str
    required: bool
    parameter_type: str
    validation: dict[str, Any]

    def __repr__(self) -> str:
        return f"EmbeddingProviderParameter(name='{self.name}')"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "defaultValue": self.default_value,
                "displayName": self.display_name,
                "help": self.help,
                "hint": self.hint,
                "name": self.name,
                "required": self.required,
                "type": self.parameter_type,
                "validation": self.validation,
            }.items()
            if v is not None
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderParameter:
        """
        Create an instance of EmbeddingProviderParameter from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "defaultValue",
            "displayName",
            "help",
            "hint",
            "name",
            "required",
            "type",
            "validation",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"an `EmbeddingProviderParameter`: '{','.join(sorted(residual_keys))}'"
            )
        return EmbeddingProviderParameter(
            default_value=raw_dict.get("defaultValue"),
            display_name=raw_dict.get("displayName"),
            help=raw_dict.get("help"),
            hint=raw_dict.get("hint"),
            name=raw_dict["name"],
            required=raw_dict["required"],
            parameter_type=raw_dict["type"],
            validation=raw_dict["validation"],
        )

Class variables

var default_value : Any
var display_name : str | None
var help : str | None
var hint : str | None
var name : str
var parameter_type : str
var required : bool
var validation : dict[str, typing.Any]

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "defaultValue": self.default_value,
            "displayName": self.display_name,
            "help": self.help,
            "hint": self.hint,
            "name": self.name,
            "required": self.required,
            "type": self.parameter_type,
            "validation": self.validation,
        }.items()
        if v is not None
    }
class EmbeddingProviderToken (accepted: str, forwarded: str)

A representation of a "token", that is a specific secret string, needed by an embedding model; this models a part of the response from the 'findEmbeddingProviders' Data API endpoint.

Attributes

accepted
the name of this "token" as seen by the Data API. This is the name that should be used in the clients when supplying the secret, whether as header or by shared-secret.
forwarded
the name used by the API when issuing the embedding request to the embedding provider. This is of no direct interest for the Data API user.
Expand source code
@dataclass
class EmbeddingProviderToken:
    """
    A representation of a "token", that is a specific secret string, needed by
    an embedding model; this models a part of the response from the
    'findEmbeddingProviders' Data API endpoint.

    Attributes:
        accepted: the name of this "token" as seen by the Data API. This is the
            name that should be used in the clients when supplying the secret,
            whether as header or by shared-secret.
        forwarded: the name used by the API when issuing the embedding request
            to the embedding provider. This is of no direct interest for the Data API user.
    """

    accepted: str
    forwarded: str

    def __repr__(self) -> str:
        return f"EmbeddingProviderToken('{self.accepted}')"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "accepted": self.accepted,
            "forwarded": self.forwarded,
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderToken:
        """
        Create an instance of EmbeddingProviderToken from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "accepted",
            "forwarded",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"an `EmbeddingProviderToken`: '{','.join(sorted(residual_keys))}'"
            )
        return EmbeddingProviderToken(
            accepted=raw_dict["accepted"],
            forwarded=raw_dict["forwarded"],
        )

Class variables

var accepted : str
var forwarded : str

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "accepted": self.accepted,
        "forwarded": self.forwarded,
    }
class FindEmbeddingProvidersResult (embedding_providers: dict[str, EmbeddingProvider], raw_info: dict[str, Any] | None)

A representation of the whole response from the 'findEmbeddingProviders' Data API endpoint.

Attributes

embedding_providers
a dictionary of provider names to EmbeddingProvider objects.
raw_info
a (nested) dictionary containing the original full response from the endpoint.
Expand source code
@dataclass
class FindEmbeddingProvidersResult:
    """
    A representation of the whole response from the 'findEmbeddingProviders'
    Data API endpoint.

    Attributes:
        embedding_providers: a dictionary of provider names to EmbeddingProvider objects.
        raw_info: a (nested) dictionary containing the original full response from the endpoint.
    """

    def __repr__(self) -> str:
        return (
            "FindEmbeddingProvidersResult(embedding_providers="
            f"{', '.join(sorted(self.embedding_providers.keys()))})"
        )

    embedding_providers: dict[str, EmbeddingProvider]
    raw_info: dict[str, Any] | None

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "embeddingProviders": {
                ep_name: e_provider.as_dict()
                for ep_name, e_provider in self.embedding_providers.items()
            },
        }

    @staticmethod
    def _from_dict(raw_dict: dict[str, Any]) -> FindEmbeddingProvidersResult:
        """
        Create an instance of FindEmbeddingProvidersResult from a dictionary
        such as one from the Data API.
        """

        residual_keys = raw_dict.keys() - {
            "embeddingProviders",
        }
        if residual_keys:
            warnings.warn(
                "Unexpected key(s) encountered parsing a dictionary into "
                f"a `FindEmbeddingProvidersResult`: '{','.join(sorted(residual_keys))}'"
            )
        return FindEmbeddingProvidersResult(
            raw_info=raw_dict,
            embedding_providers={
                ep_name: EmbeddingProvider._from_dict(ep_body)
                for ep_name, ep_body in raw_dict["embeddingProviders"].items()
            },
        )

Class variables

var embedding_providers : dict[str, EmbeddingProvider]
var raw_info : dict[str, typing.Any] | None

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "embeddingProviders": {
            ep_name: e_provider.as_dict()
            for ep_name, e_provider in self.embedding_providers.items()
        },
    }
class ListTableDefinition (columns: dict[str, TableColumnTypeDescriptor], primary_key: TablePrimaryKeyDescriptor)

A structure expressing the definition ("schema") of a table the way the Data API describes it. This is the returned object when querying the Data API about table metadata.

This class differs from CreateTableDefinition, used when creating tables: this one can also describe tables with unsupported features, which could not be created through the Data API.

Attributes

columns
a map from column names to their type definition object.
primary_key
a specification of the primary key for the table.
Expand source code
@dataclass
class ListTableDefinition:
    """
    A structure expressing the definition ("schema") of a table the way the Data API
    describes it. This is the returned object when querying the Data API about
    table metadata.

    This class differs from `CreateTableDefinition`, used when creating tables:
    this one can also describe tables with unsupported features, which could not
    be created through the Data API.

    Attributes:
        columns: a map from column names to their type definition object.
        primary_key: a specification of the primary key for the table.
    """

    columns: dict[str, TableColumnTypeDescriptor]
    primary_key: TablePrimaryKeyDescriptor

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                f"columns=[{','.join(self.columns.keys())}]",
                f"primary_key={self.primary_key}",
            ]
            if pc is not None
        ]
        return f"{self.__class__.__name__}({', '.join(not_null_pieces)})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "columns": {
                    col_n: col_v.as_dict() for col_n, col_v in self.columns.items()
                },
                "primaryKey": self.primary_key.as_dict(),
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> ListTableDefinition:
        """
        Create an instance of ListTableDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"columns", "primaryKey"})
        return ListTableDefinition(
            columns={
                col_n: TableColumnTypeDescriptor.coerce(col_v)
                for col_n, col_v in raw_dict["columns"].items()
            },
            primary_key=TablePrimaryKeyDescriptor.coerce(raw_dict["primaryKey"]),
        )

    @classmethod
    def coerce(
        cls, raw_input: ListTableDefinition | dict[str, Any]
    ) -> ListTableDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a ListTableDefinition.
        """

        if isinstance(raw_input, ListTableDefinition):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Class variables

var columns : dict[str, TableColumnTypeDescriptor]
var primary_keyTablePrimaryKeyDescriptor

Static methods

def coerce(raw_input: ListTableDefinition | dict[str, Any]) ‑> ListTableDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: ListTableDefinition | dict[str, Any]
) -> ListTableDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a ListTableDefinition.
    """

    if isinstance(raw_input, ListTableDefinition):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "columns": {
                col_n: col_v.as_dict() for col_n, col_v in self.columns.items()
            },
            "primaryKey": self.primary_key.as_dict(),
        }.items()
        if v is not None
    }
class ListTableDescriptor (name: str, definition: ListTableDefinition, raw_descriptor: dict[str, Any] | None)

A structure expressing full description of a table as the Data API returns it, i.e. its name and its definition sub-structure.

Attributes

name
the name of the table.
definition
a ListTableDefinition instance.
raw_descriptor
the raw response from the Data API.
Expand source code
@dataclass
class ListTableDescriptor:
    """
    A structure expressing full description of a table as the Data API
    returns it, i.e. its name and its `definition` sub-structure.

    Attributes:
        name: the name of the table.
        definition: a ListTableDefinition instance.
        raw_descriptor: the raw response from the Data API.
    """

    name: str
    definition: ListTableDefinition
    raw_descriptor: dict[str, Any] | None

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                f"name={self.name.__repr__()}",
                f"definition={self.definition.__repr__()}",
                None if self.raw_descriptor is None else "raw_descriptor=...",
            ]
            if pc is not None
        ]
        return f"{self.__class__.__name__}({', '.join(not_null_pieces)})"

    def as_dict(self) -> dict[str, Any]:
        """
        Recast this object into a dictionary.
        Empty `definition` will not be returned at all.
        """

        return {
            k: v
            for k, v in {
                "name": self.name,
                "definition": self.definition.as_dict(),
            }.items()
            if v
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> ListTableDescriptor:
        """
        Create an instance of ListTableDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"name", "definition"})
        return ListTableDescriptor(
            name=raw_dict["name"],
            definition=ListTableDefinition.coerce(raw_dict.get("definition") or {}),
            raw_descriptor=raw_dict,
        )

    @classmethod
    def coerce(
        cls, raw_input: ListTableDescriptor | dict[str, Any]
    ) -> ListTableDescriptor:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a ListTableDescriptor.
        """

        if isinstance(raw_input, ListTableDescriptor):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Class variables

var definitionListTableDefinition
var name : str
var raw_descriptor : dict[str, typing.Any] | None

Static methods

def coerce(raw_input: ListTableDescriptor | dict[str, Any]) ‑> ListTableDescriptor

Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDescriptor.

Expand source code
@classmethod
def coerce(
    cls, raw_input: ListTableDescriptor | dict[str, Any]
) -> ListTableDescriptor:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a ListTableDescriptor.
    """

    if isinstance(raw_input, ListTableDescriptor):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary. Empty definition will not be returned at all.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """
    Recast this object into a dictionary.
    Empty `definition` will not be returned at all.
    """

    return {
        k: v
        for k, v in {
            "name": self.name,
            "definition": self.definition.as_dict(),
        }.items()
        if v
    }
class TableAPIIndexSupportDescriptor (cql_definition: str, create_index: bool, filter: bool)

Represents the additional information returned by the Data API when describing an index that has 'unsupported' status. Unsupported indexes may have been created by means other than the Data API (e.g. CQL direct interaction with the database).

The Data API reports these indexes along with the others when listing the indexes, and provides the information marshaled in this object to detail which level of support the index has (for instance, it can be a partial support where the index can still be used to filter reads).

Attributes

cql_definition
a free-form string containing the CQL definition for the index.
create_index
whether such an index can be created through the Data API.
filter
whether the index can be involved in a Data API filter clause.
Expand source code
@dataclass
class TableAPIIndexSupportDescriptor:
    """
    Represents the additional information returned by the Data API when describing
    an index that has 'unsupported' status. Unsupported indexes may have been created by
    means other than the Data API (e.g. CQL direct interaction with the database).

    The Data API reports these indexes along with the others when listing the indexes,
    and provides the information marshaled in this object to detail which level
    of support the index has (for instance, it can be a partial support where the
    index can still be used to filter reads).

    Attributes:
        cql_definition: a free-form string containing the CQL definition for the index.
        create_index: whether such an index can be created through the Data API.
        filter: whether the index can be involved in a Data API filter clause.
    """

    cql_definition: str
    create_index: bool
    filter: bool

    def __repr__(self) -> str:
        desc = ", ".join(
            [
                f'"{self.cql_definition}"',
                f"create_index={self.create_index}",
                f"filter={self.filter}",
            ]
        )
        return f"{self.__class__.__name__}({desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "cqlDefinition": self.cql_definition,
            "createIndex": self.create_index,
            "filter": self.filter,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableAPIIndexSupportDescriptor:
        """
        Create an instance of TableAPIIndexSupportDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(
            cls,
            raw_dict,
            {"cqlDefinition", "createIndex", "filter"},
        )
        return TableAPIIndexSupportDescriptor(
            cql_definition=raw_dict["cqlDefinition"],
            create_index=raw_dict["createIndex"],
            filter=raw_dict["filter"],
        )

Class variables

var cql_definition : str
var create_index : bool
var filter : bool

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "cqlDefinition": self.cql_definition,
        "createIndex": self.create_index,
        "filter": self.filter,
    }
class TableAPISupportDescriptor (cql_definition: str, create_table: bool, insert: bool, read: bool)

Represents the additional information returned by the Data API when describing a table with unsupported columns. Unsupported columns may have been created by means other than the Data API (e.g. CQL direct interaction with the database).

The Data API reports these columns when listing the tables and their metadata, and provides the information marshaled in this object to detail which level of support the column has (for instance, it can be a partial support where the column is readable by the API but not writable).

Attributes

cql_definition
a free-form string containing the CQL definition for the column.
create_table
whether a column of this nature can be used in API table creation.
insert
whether a column of this nature can be written through the API.
read
whether a column of this nature can be read through the API.
Expand source code
@dataclass
class TableAPISupportDescriptor:
    """
    Represents the additional information returned by the Data API when describing
    a table with unsupported columns. Unsupported columns may have been created by
    means other than the Data API (e.g. CQL direct interaction with the database).

    The Data API reports these columns when listing the tables and their metadata,
    and provides the information marshaled in this object to detail which level
    of support the column has (for instance, it can be a partial support where the
    column is readable by the API but not writable).

    Attributes:
        cql_definition: a free-form string containing the CQL definition for the column.
        create_table: whether a column of this nature can be used in API table creation.
        insert: whether a column of this nature can be written through the API.
        read: whether a column of this nature can be read through the API.
    """

    cql_definition: str
    create_table: bool
    insert: bool
    read: bool

    def __repr__(self) -> str:
        desc = ", ".join(
            [
                f'"{self.cql_definition}"',
                f"create_table={self.create_table}",
                f"insert={self.insert}",
                f"read={self.read}",
            ]
        )
        return f"{self.__class__.__name__}({desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "cqlDefinition": self.cql_definition,
            "createTable": self.create_table,
            "insert": self.insert,
            "read": self.read,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableAPISupportDescriptor:
        """
        Create an instance of TableAPISupportDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(
            cls,
            raw_dict,
            {"cqlDefinition", "createTable", "insert", "read"},
        )
        return TableAPISupportDescriptor(
            cql_definition=raw_dict["cqlDefinition"],
            create_table=raw_dict["createTable"],
            insert=raw_dict["insert"],
            read=raw_dict["read"],
        )

Class variables

var cql_definition : str
var create_table : bool
var insert : bool
var read : bool

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "cqlDefinition": self.cql_definition,
        "createTable": self.create_table,
        "insert": self.insert,
        "read": self.read,
    }
class TableBaseIndexDefinition (column: str)

An object describing an index definition, including the name of the indexed column and the index options if there are any. This is an abstract class common to the various types of index: see the appropriate subclass for more details.

Attributes

column
the name of the indexed column.
Expand source code
@dataclass
class TableBaseIndexDefinition(ABC):
    """
    An object describing an index definition, including the name of the indexed column
    and the index options if there are any.
    This is an abstract class common to the various types of index:
    see the appropriate subclass for more details.

    Attributes:
        column: the name of the indexed column.
    """

    column: str

    @abstractmethod
    def as_dict(self) -> dict[str, Any]: ...

    @classmethod
    def _from_dict(cls, raw_input: dict[str, Any]) -> TableBaseIndexDefinition:
        """
        Create an instance of TableBaseIndexDefinition from a dictionary
        such as one from the Data API. This method inspects the input dictionary
        to select the right class to use so as to represent the index definition.
        """

        if "options" not in raw_input:
            if raw_input["column"] == "UNKNOWN" and "apiSupport" in raw_input:
                return TableUnsupportedIndexDefinition.coerce(raw_input)
            else:
                return TableIndexDefinition.coerce(raw_input)
        else:
            if "metric" in raw_input["options"]:
                return TableVectorIndexDefinition.coerce(raw_input)
            else:
                return TableIndexDefinition.coerce(raw_input)

Ancestors

  • abc.ABC

Subclasses

Class variables

var column : str

Methods

def as_dict(self) ‑> dict[str, typing.Any]
Expand source code
@abstractmethod
def as_dict(self) -> dict[str, Any]: ...
class TableIndexDefinition (column: str, options: TableIndexOptions | UnsetType = (unset))

An object describing a regular (non-vector) index definition, including the name of the indexed column and the index options.

Attributes

column
the name of the indexed column.
options
a TableIndexOptions detailing the index configuration.
Expand source code
@dataclass
class TableIndexDefinition(TableBaseIndexDefinition):
    """
    An object describing a regular (non-vector) index definition,
    including the name of the indexed column and the index options.

    Attributes:
        column: the name of the indexed column.
        options: a `TableIndexOptions` detailing the index configuration.
    """

    options: TableIndexOptions

    def __init__(
        self,
        column: str,
        options: TableIndexOptions | UnsetType = _UNSET,
    ) -> None:
        self.column = column
        self.options = (
            TableIndexOptions() if isinstance(options, UnsetType) else options
        )

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.column}, options={self.options})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "column": self.column,
            "options": self.options.as_dict(),
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableIndexDefinition:
        """
        Create an instance of TableIndexDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"column", "options"})
        return TableIndexDefinition(
            column=raw_dict["column"],
            options=TableIndexOptions.coerce(raw_dict["options"]),
        )

    @classmethod
    def coerce(
        cls, raw_input: TableIndexDefinition | dict[str, Any]
    ) -> TableIndexDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableIndexDefinition.
        """

        if isinstance(raw_input, TableIndexDefinition):
            return raw_input
        else:
            _filled_raw_input = {**{"options": {}}, **raw_input}
            return cls._from_dict(_filled_raw_input)

Ancestors

Class variables

var optionsTableIndexOptions

Static methods

def coerce(raw_input: TableIndexDefinition | dict[str, Any]) ‑> TableIndexDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: TableIndexDefinition | dict[str, Any]
) -> TableIndexDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableIndexDefinition.
    """

    if isinstance(raw_input, TableIndexDefinition):
        return raw_input
    else:
        _filled_raw_input = {**{"options": {}}, **raw_input}
        return cls._from_dict(_filled_raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "column": self.column,
        "options": self.options.as_dict(),
    }
class TableIndexDescriptor (name: str, definition: TableBaseIndexDefinition)

The top-level object describing a table index on a column.

The hierarchical arrangement of TableIndexDescriptor, which contains a TableBaseIndexDefinition (plus possibly index options within the latter), is designed to mirror the shape of payloads and response about indexes in the Data API.

Attributes

name
the name of the index. Index names are unique within a keyspace: hence, two tables in the same keyspace cannot use the same name for their indexes.
definition
an appropriate concrete subclass of TableBaseIndexDefinition providing the detailed definition of the index.
Expand source code
@dataclass
class TableIndexDescriptor:
    """
    The top-level object describing a table index on a column.

    The hierarchical arrangement of `TableIndexDescriptor`, which contains a
    `TableBaseIndexDefinition` (plus possibly index options within the latter),
    is designed to mirror the shape of payloads and response about indexes in the
    Data API.

    Attributes:
        name: the name of the index. Index names are unique within a keyspace: hence,
            two tables in the same keyspace cannot use the same name for their indexes.
        definition: an appropriate concrete subclass of `TableBaseIndexDefinition`
            providing the detailed definition of the index.
    """

    name: str
    definition: TableBaseIndexDefinition

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "name": self.name,
            "definition": self.definition.as_dict(),
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableIndexDescriptor:
        """
        Create an instance of TableIndexDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"name", "definition"})
        return TableIndexDescriptor(
            name=raw_dict["name"],
            definition=TableBaseIndexDefinition._from_dict(raw_dict["definition"]),
        )

    def coerce(
        raw_input: TableIndexDescriptor | dict[str, Any],
    ) -> TableIndexDescriptor:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableIndexDescriptor.
        """

        if isinstance(raw_input, TableIndexDescriptor):
            return raw_input
        else:
            return TableIndexDescriptor._from_dict(raw_input)

Class variables

var definitionTableBaseIndexDefinition
var name : str

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "name": self.name,
        "definition": self.definition.as_dict(),
    }
def coerce(raw_input: TableIndexDescriptor | dict[str, Any]) ‑> TableIndexDescriptor

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDescriptor.

Expand source code
def coerce(
    raw_input: TableIndexDescriptor | dict[str, Any],
) -> TableIndexDescriptor:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableIndexDescriptor.
    """

    if isinstance(raw_input, TableIndexDescriptor):
        return raw_input
    else:
        return TableIndexDescriptor._from_dict(raw_input)
class TableIndexOptions (ascii: bool | UnsetType = (unset), normalize: bool | UnsetType = (unset), case_sensitive: bool | UnsetType = (unset))

An object describing the options for a table regular (non-vector) index.

Both when creating indexes and retrieving index metadata from the API, instances of TableIndexOptions are used to express the corresponding index settings.

Attributes

ascii
whether the index should convert to US-ASCII before indexing. It can be passed only for indexes on a TEXT or ASCII column.
normalize
whether the index should normalize Unicode and diacritics before indexing. It can be passed only for indexes on a TEXT or ASCII column.
case_sensitive
whether the index should index the input in a case-sensitive manner. It can be passed only for indexes on a TEXT or ASCII column.
Expand source code
@dataclass
class TableIndexOptions:
    """
    An object describing the options for a table regular (non-vector) index.

    Both when creating indexes and retrieving index metadata from the API, instances
    of TableIndexOptions are used to express the corresponding index settings.

    Attributes:
        ascii: whether the index should convert to US-ASCII before indexing.
            It can be passed only for indexes on a TEXT or ASCII column.
        normalize: whether the index should normalize Unicode and diacritics before
            indexing. It can be passed only for indexes on a TEXT or ASCII column.
        case_sensitive: whether the index should index the input in a case-sensitive
            manner. It can be passed only for indexes on a TEXT or ASCII column.
    """

    ascii: bool | UnsetType = _UNSET
    normalize: bool | UnsetType = _UNSET
    case_sensitive: bool | UnsetType = _UNSET

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in (
                None if isinstance(self.ascii, UnsetType) else f"ascii={self.ascii}",
                None
                if isinstance(self.ascii, UnsetType)
                else f"normalize={self.normalize}",
                None
                if isinstance(self.ascii, UnsetType)
                else f"case_sensitive={self.case_sensitive}",
            )
            if pc is not None
        ]
        inner_desc = ", ".join(not_null_pieces)
        return f"{self.__class__.__name__}({inner_desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "ascii": None if isinstance(self.ascii, UnsetType) else self.ascii,
                "normalize": None
                if isinstance(self.normalize, UnsetType)
                else self.normalize,
                "caseSensitive": None
                if isinstance(self.case_sensitive, UnsetType)
                else self.case_sensitive,
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableIndexOptions:
        """
        Create an instance of TableIndexOptions from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"ascii", "normalize", "caseSensitive"})
        return TableIndexOptions(
            ascii=raw_dict["ascii"] if raw_dict.get("ascii") is not None else _UNSET,
            normalize=raw_dict["normalize"]
            if raw_dict.get("normalize") is not None
            else _UNSET,
            case_sensitive=raw_dict["caseSensitive"]
            if raw_dict.get("caseSensitive") is not None
            else _UNSET,
        )

    @classmethod
    def coerce(cls, raw_input: TableIndexOptions | dict[str, Any]) -> TableIndexOptions:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableIndexOptions.
        """

        if isinstance(raw_input, TableIndexOptions):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Class variables

var ascii : bool | UnsetType
var case_sensitive : bool | UnsetType
var normalize : bool | UnsetType

Static methods

def coerce(raw_input: TableIndexOptions | dict[str, Any]) ‑> TableIndexOptions

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexOptions.

Expand source code
@classmethod
def coerce(cls, raw_input: TableIndexOptions | dict[str, Any]) -> TableIndexOptions:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableIndexOptions.
    """

    if isinstance(raw_input, TableIndexOptions):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "ascii": None if isinstance(self.ascii, UnsetType) else self.ascii,
            "normalize": None
            if isinstance(self.normalize, UnsetType)
            else self.normalize,
            "caseSensitive": None
            if isinstance(self.case_sensitive, UnsetType)
            else self.case_sensitive,
        }.items()
        if v is not None
    }
class TableInfo (database_info: AstraDBDatabaseInfo, keyspace: str, name: str, full_name: str)

Represents the identifying information for a table, including the information about the database the table belongs to.

Attributes

database_info
an AstraDBDatabaseInfo instance for the underlying database.
keyspace
the keyspace where the table is located.
name
table name. Unique within a keyspace (across tables/collections).
full_name
identifier for the table within the database, in the form "keyspace.table_name".
Expand source code
@dataclass
class TableInfo:
    """
    Represents the identifying information for a table,
    including the information about the database the table belongs to.

    Attributes:
        database_info: an AstraDBDatabaseInfo instance for the underlying database.
        keyspace: the keyspace where the table is located.
        name: table name. Unique within a keyspace (across tables/collections).
        full_name: identifier for the table within the database,
            in the form "keyspace.table_name".
    """

    database_info: AstraDBDatabaseInfo
    keyspace: str
    name: str
    full_name: str

Class variables

var database_infoAstraDBDatabaseInfo
var full_name : str
var keyspace : str
var name : str
class TableKeyValuedColumnType (*args, **kwds)

An enum to describe the types of column with "keys and values".

Expand source code
class TableKeyValuedColumnType(StrEnum):
    """
    An enum to describe the types of column with "keys and values".
    """

    MAP = "map"

Ancestors

Class variables

var MAP

Inherited members

class TableKeyValuedColumnTypeDescriptor (*, column_type: str | TableKeyValuedColumnType, value_type: str | ColumnType, key_type: str | ColumnType)

Represents and describes a column in a Table, of a 'key-value' type, that stores an associative map (essentially a dict) between keys of a certain scalar type and values of a certain scalar type. The only such kind of column is a "map".

Attributes

column_type
an instance of TableKeyValuedColumnType. When creating the object, this can be omitted as it only ever assumes the "MAP" value.
key_type
the type of the individual keys in the map column. This is a ColumnType, but when creating the object, strings such as "TEXT" or "UUID" are also accepted.
value_type
the type of the individual values stored in the map for a single key. This is a ColumnType, but when creating the object, strings such as "TEXT" or "UUID" are also accepted.
Expand source code
@dataclass
class TableKeyValuedColumnTypeDescriptor(TableColumnTypeDescriptor):
    """
    Represents and describes a column in a Table, of a 'key-value' type, that stores
    an associative map (essentially a dict) between keys of a certain scalar type and
    values of a certain scalar type. The only such kind of column is a "map".

    Attributes:
        column_type: an instance of `TableKeyValuedColumnType`. When creating the
            object, this can be omitted as it only ever assumes the "MAP" value.
        key_type: the type of the individual keys in the map column.
            This is a `ColumnType`, but when creating the object,
            strings such as "TEXT" or "UUID" are also accepted.
        value_type: the type of the individual values stored in the map for a single key.
            This is a `ColumnType`, but when creating the object,
            strings such as "TEXT" or "UUID" are also accepted.
    """

    column_type: TableKeyValuedColumnType
    key_type: ColumnType
    value_type: ColumnType

    def __init__(
        self,
        *,
        column_type: str | TableKeyValuedColumnType,
        value_type: str | ColumnType,
        key_type: str | ColumnType,
    ) -> None:
        self.key_type = ColumnType.coerce(key_type)
        self.value_type = ColumnType.coerce(value_type)
        super().__init__(column_type=TableKeyValuedColumnType.coerce(column_type))

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.column_type}<{self.key_type},{self.value_type}>)"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "type": self.column_type.value,
            "keyType": self.key_type.value,
            "valueType": self.value_type.value,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableKeyValuedColumnTypeDescriptor:
        """
        Create an instance of TableKeyValuedColumnTypeDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"type", "keyType", "valueType"})
        return TableKeyValuedColumnTypeDescriptor(
            column_type=raw_dict["type"],
            key_type=raw_dict["keyType"],
            value_type=raw_dict["valueType"],
        )

Ancestors

Class variables

var column_typeTableKeyValuedColumnType
var key_typeColumnType
var value_typeColumnType

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "type": self.column_type.value,
        "keyType": self.key_type.value,
        "valueType": self.value_type.value,
    }

Inherited members

class TablePrimaryKeyDescriptor (partition_by: list[str], partition_sort: dict[str, int])

Represents the part of a table definition that describes the primary key.

Attributes

partition_by
a list of column names forming the partition key, i.e. the portion of primary key that determines physical grouping and storage of rows on the database. Rows with the same values for the partition_by columns are guaranteed to be stored next to each other. This list cannot be empty.
partition_sort
this defines how rows are to be sorted within a partition. It is a dictionary that specifies, for each column of the primary key not in the partition_by field, whether the sorting is ascending or descending (see the values in the SortMode constant). The sorting within a partition considers all columns in this dictionary, in a hierarchical way: hence, ordering in this dictionary is relevant.
Expand source code
@dataclass
class TablePrimaryKeyDescriptor:
    """
    Represents the part of a table definition that describes the primary key.

    Attributes:
        partition_by: a list of column names forming the partition key, i.e.
            the portion of primary key that determines physical grouping and storage
            of rows on the database. Rows with the same values for the partition_by
            columns are guaranteed to be stored next to each other. This list
            cannot be empty.
        partition_sort: this defines how rows are to be sorted within a partition.
            It is a dictionary that specifies, for each column of the primary key
            not in the `partition_by` field, whether the sorting is ascending
            or descending (see the values in the `SortMode` constant).
            The sorting within a partition considers all columns in this dictionary,
            in a hierarchical way: hence, ordering in this dictionary is relevant.
    """

    partition_by: list[str]
    partition_sort: dict[str, int]

    def __repr__(self) -> str:
        partition_key_block = ",".join(self.partition_by)
        clustering_block = ",".join(
            f"{clu_col_name}:{'a' if clu_col_sort > 0 else 'd'}"
            for clu_col_name, clu_col_sort in self.partition_sort.items()
        )
        pk_block = f"({partition_key_block}){clustering_block}"
        return f"{self.__class__.__name__}[{pk_block}]"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "partitionBy": self.partition_by,
                "partitionSort": dict(self.partition_sort.items()),
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TablePrimaryKeyDescriptor:
        """
        Create an instance of TablePrimaryKeyDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"partitionBy", "partitionSort"})
        return TablePrimaryKeyDescriptor(
            partition_by=raw_dict["partitionBy"],
            partition_sort=raw_dict["partitionSort"],
        )

    @classmethod
    def coerce(
        cls, raw_input: TablePrimaryKeyDescriptor | dict[str, Any] | str
    ) -> TablePrimaryKeyDescriptor:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TablePrimaryKeyDescriptor.
        """

        if isinstance(raw_input, TablePrimaryKeyDescriptor):
            return raw_input
        elif isinstance(raw_input, str):
            return cls._from_dict({"partitionBy": [raw_input], "partitionSort": {}})
        else:
            return cls._from_dict(raw_input)

Class variables

var partition_by : list[str]
var partition_sort : dict[str, int]

Static methods

def coerce(raw_input: TablePrimaryKeyDescriptor | dict[str, Any] | str) ‑> TablePrimaryKeyDescriptor

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TablePrimaryKeyDescriptor.

Expand source code
@classmethod
def coerce(
    cls, raw_input: TablePrimaryKeyDescriptor | dict[str, Any] | str
) -> TablePrimaryKeyDescriptor:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TablePrimaryKeyDescriptor.
    """

    if isinstance(raw_input, TablePrimaryKeyDescriptor):
        return raw_input
    elif isinstance(raw_input, str):
        return cls._from_dict({"partitionBy": [raw_input], "partitionSort": {}})
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "partitionBy": self.partition_by,
            "partitionSort": dict(self.partition_sort.items()),
        }.items()
        if v is not None
    }
class TableScalarColumnTypeDescriptor (column_type: str | ColumnType)

Represents and describes a column in a Table, of scalar type, i.e. which contains a single simple value.

Attributes

column_type
a ColumnType value. When creating the object, simple strings such as "TEXT" or "UUID" are also accepted.
Expand source code
@dataclass
class TableScalarColumnTypeDescriptor(TableColumnTypeDescriptor):
    """
    Represents and describes a column in a Table, of scalar type, i.e. which contains
    a single simple value.

    Attributes:
        column_type: a `ColumnType` value. When creating the object,
            simple strings such as "TEXT" or "UUID" are also accepted.
    """

    column_type: ColumnType

    def __init__(self, column_type: str | ColumnType) -> None:
        self.column_type = ColumnType.coerce(column_type)

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.column_type})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "type": self.column_type.value,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableScalarColumnTypeDescriptor:
        """
        Create an instance of TableScalarColumnTypeDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"type"})
        return TableScalarColumnTypeDescriptor(
            column_type=raw_dict["type"],
        )

Ancestors

Class variables

var column_typeColumnType

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "type": self.column_type.value,
    }

Inherited members

class TableUnsupportedColumnTypeDescriptor (*, column_type: TableUnsupportedColumnType | str, api_support: TableAPISupportDescriptor)

Represents and describes a column in a Table, of unsupported type.

Note that this column type descriptor cannot be used in table creation, rather it can only be returned when listing the tables or getting their metadata by the API.

Attributes

column_type
an instance of TableUnsupportedColumnType.
api_support
a TableAPISupportDescriptor object giving more details.

This class has no coerce method, since it is always only found in API responses.

Expand source code
@dataclass
class TableUnsupportedColumnTypeDescriptor(TableColumnTypeDescriptor):
    """
    Represents and describes a column in a Table, of unsupported type.

    Note that this column type descriptor cannot be used in table creation,
    rather it can only be returned when listing the tables or getting their
    metadata by the API.

    Attributes:
        column_type: an instance of `TableUnsupportedColumnType`.
        api_support: a `TableAPISupportDescriptor` object giving more details.

    This class has no `coerce` method, since it is always only found in API responses.
    """

    column_type: TableUnsupportedColumnType
    api_support: TableAPISupportDescriptor

    def __init__(
        self,
        *,
        column_type: TableUnsupportedColumnType | str,
        api_support: TableAPISupportDescriptor,
    ) -> None:
        self.api_support = api_support
        super().__init__(column_type=TableUnsupportedColumnType.coerce(column_type))

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.api_support.cql_definition})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "type": self.column_type.value,
            "apiSupport": self.api_support.as_dict(),
        }

    @classmethod
    def _from_dict(
        cls, raw_dict: dict[str, Any]
    ) -> TableUnsupportedColumnTypeDescriptor:
        """
        Create an instance of TableUnsupportedColumnTypeDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"type", "apiSupport"})
        return TableUnsupportedColumnTypeDescriptor(
            column_type=raw_dict["type"],
            api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]),
        )

Ancestors

Class variables

var api_supportTableAPISupportDescriptor
var column_typeTableUnsupportedColumnType

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "type": self.column_type.value,
        "apiSupport": self.api_support.as_dict(),
    }

Inherited members

class TableUnsupportedIndexDefinition (column: str, api_support: TableAPIIndexSupportDescriptor)

An object describing the definition of an unsupported index found on a table, including the name of the indexed column and the index support status.

Attributes

column
the name of the indexed column.
api_support
a TableAPIIndexSupportDescriptor detailing the level of support for the index by the Data API.
Expand source code
@dataclass
class TableUnsupportedIndexDefinition(TableBaseIndexDefinition):
    """
    An object describing the definition of an unsupported index found on a table,
    including the name of the indexed column and the index support status.

    Attributes:
        column: the name of the indexed column.
        api_support: a `TableAPIIndexSupportDescriptor` detailing the level of support
            for the index by the Data API.
    """

    api_support: TableAPIIndexSupportDescriptor

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.api_support.cql_definition})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "column": self.column,
            "apiSupport": self.api_support.as_dict(),
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableUnsupportedIndexDefinition:
        """
        Create an instance of TableIndexDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"column", "apiSupport"})
        return TableUnsupportedIndexDefinition(
            column=raw_dict["column"],
            api_support=TableAPIIndexSupportDescriptor._from_dict(
                raw_dict["apiSupport"]
            ),
        )

    @classmethod
    def coerce(
        cls, raw_input: TableUnsupportedIndexDefinition | dict[str, Any]
    ) -> TableUnsupportedIndexDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableUnsupportedIndexDefinition.
        """

        if isinstance(raw_input, TableUnsupportedIndexDefinition):
            return raw_input
        else:
            return cls._from_dict(raw_input)

Ancestors

Class variables

var api_supportTableAPIIndexSupportDescriptor

Static methods

def coerce(raw_input: TableUnsupportedIndexDefinition | dict[str, Any]) ‑> TableUnsupportedIndexDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableUnsupportedIndexDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: TableUnsupportedIndexDefinition | dict[str, Any]
) -> TableUnsupportedIndexDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableUnsupportedIndexDefinition.
    """

    if isinstance(raw_input, TableUnsupportedIndexDefinition):
        return raw_input
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "column": self.column,
        "apiSupport": self.api_support.as_dict(),
    }
class TableValuedColumnType (*args, **kwds)

An enum to describe the types of column with "values".

Expand source code
class TableValuedColumnType(StrEnum):
    """
    An enum to describe the types of column with "values".
    """

    LIST = "list"
    SET = "set"

Ancestors

Class variables

var LIST
var SET

Inherited members

class TableValuedColumnTypeDescriptor (*, column_type: str | TableValuedColumnType, value_type: str | ColumnType)

Represents and describes a column in a Table, of a 'valued' type that stores multiple values. This means either a list or a set of homogeneous items.

Attributes

column_type
an instance of TableValuedColumnType. When creating the object, simple strings such as "list" or "set" are also accepted.
value_type
the type of the individual items stored in the column. This is a ColumnType, but when creating the object, strings such as "TEXT" or "UUID" are also accepted.
Expand source code
@dataclass
class TableValuedColumnTypeDescriptor(TableColumnTypeDescriptor):
    """
    Represents and describes a column in a Table, of a 'valued' type that stores
    multiple values. This means either a list or a set of homogeneous items.

    Attributes:
        column_type: an instance of `TableValuedColumnType`. When creating the
            object, simple strings such as "list" or "set" are also accepted.
        value_type: the type of the individual items stored in the column.
            This is a `ColumnType`, but when creating the object,
            strings such as "TEXT" or "UUID" are also accepted.
    """

    column_type: TableValuedColumnType
    value_type: ColumnType

    def __init__(
        self,
        *,
        column_type: str | TableValuedColumnType,
        value_type: str | ColumnType,
    ) -> None:
        self.value_type = ColumnType.coerce(value_type)
        super().__init__(column_type=TableValuedColumnType.coerce(column_type))

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.column_type}<{self.value_type}>)"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "type": self.column_type.value,
            "valueType": self.value_type.value,
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableValuedColumnTypeDescriptor:
        """
        Create an instance of TableValuedColumnTypeDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"type", "valueType"})
        return TableValuedColumnTypeDescriptor(
            column_type=raw_dict["type"],
            value_type=raw_dict["valueType"],
        )

Ancestors

Class variables

var column_typeTableValuedColumnType
var value_typeColumnType

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "type": self.column_type.value,
        "valueType": self.value_type.value,
    }

Inherited members

class TableVectorColumnTypeDescriptor (*, column_type: str | TableVectorColumnType = TableVectorColumnType.VECTOR, dimension: int | None, service: VectorServiceOptions | None = None)

Represents and describes a column in a Table, of vector type, i.e. which contains a list of dimension floats that is treated specially as a "vector".

Attributes

column_type
a TableVectorColumnType value. This can be omitted when creating the object. It only ever assumes the "VECTOR" value.
dimension
an integer, the number of components (numbers) in the vectors. This can be left unspecified in some cases of vectorize-enabled columns.
service
an optional VectorServiceOptions object defining the vectorize settings (i.e. server-side embedding computation) for the column.
Expand source code
@dataclass
class TableVectorColumnTypeDescriptor(TableColumnTypeDescriptor):
    """
    Represents and describes a column in a Table, of vector type, i.e. which contains
    a list of `dimension` floats that is treated specially as a "vector".

    Attributes:
        column_type: a `TableVectorColumnType` value. This can be omitted when
            creating the object. It only ever assumes the "VECTOR" value.
        dimension: an integer, the number of components (numbers) in the vectors.
            This can be left unspecified in some cases of vectorize-enabled columns.
        service: an optional `VectorServiceOptions` object defining the vectorize
            settings (i.e. server-side embedding computation) for the column.
    """

    column_type: TableVectorColumnType
    dimension: int | None
    service: VectorServiceOptions | None

    def __init__(
        self,
        *,
        column_type: str | TableVectorColumnType = TableVectorColumnType.VECTOR,
        dimension: int | None,
        service: VectorServiceOptions | None = None,
    ) -> None:
        self.dimension = dimension
        self.service = service
        super().__init__(column_type=TableVectorColumnType.coerce(column_type))

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in [
                f"dimension={self.dimension}" if self.dimension is not None else None,
                None if self.service is None else f"service={self.service}",
            ]
            if pc is not None
        ]
        inner_desc = ", ".join(not_null_pieces)

        return f"{self.__class__.__name__}({self.column_type}[{inner_desc}])"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "type": self.column_type.value,
                "dimension": self.dimension,
                "service": None if self.service is None else self.service.as_dict(),
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableVectorColumnTypeDescriptor:
        """
        Create an instance of TableVectorColumnTypeDescriptor from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"type", "dimension", "service"})
        return TableVectorColumnTypeDescriptor(
            column_type=raw_dict["type"],
            dimension=raw_dict.get("dimension"),
            service=VectorServiceOptions.coerce(raw_dict.get("service")),
        )

Ancestors

Class variables

var column_typeTableVectorColumnType
var dimension : int | None
var serviceVectorServiceOptions | None

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "type": self.column_type.value,
            "dimension": self.dimension,
            "service": None if self.service is None else self.service.as_dict(),
        }.items()
        if v is not None
    }

Inherited members

class TableVectorIndexDefinition (column: str, options: TableVectorIndexOptions)

An object describing a vector index definition, including the name of the indexed column and the index options.

Attributes

column
the name of the indexed column.
options
a TableVectorIndexOptions detailing the index configuration.
Expand source code
@dataclass
class TableVectorIndexDefinition(TableBaseIndexDefinition):
    """
    An object describing a vector index definition,
    including the name of the indexed column and the index options.

    Attributes:
        column: the name of the indexed column.
        options: a `TableVectorIndexOptions` detailing the index configuration.
    """

    options: TableVectorIndexOptions

    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.column}, options={self.options})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            "column": self.column,
            "options": self.options.as_dict(),
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableVectorIndexDefinition:
        """
        Create an instance of TableIndexDefinition from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"column", "options"})
        return TableVectorIndexDefinition(
            column=raw_dict["column"],
            options=TableVectorIndexOptions.coerce(raw_dict["options"]),
        )

    @classmethod
    def coerce(
        cls, raw_input: TableVectorIndexDefinition | dict[str, Any]
    ) -> TableVectorIndexDefinition:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableVectorIndexDefinition.
        """

        if isinstance(raw_input, TableVectorIndexDefinition):
            return raw_input
        else:
            _filled_raw_input = {**{"options": {}}, **raw_input}
            return cls._from_dict(_filled_raw_input)

Ancestors

Class variables

var optionsTableVectorIndexOptions

Static methods

def coerce(raw_input: TableVectorIndexDefinition | dict[str, Any]) ‑> TableVectorIndexDefinition

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexDefinition.

Expand source code
@classmethod
def coerce(
    cls, raw_input: TableVectorIndexDefinition | dict[str, Any]
) -> TableVectorIndexDefinition:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableVectorIndexDefinition.
    """

    if isinstance(raw_input, TableVectorIndexDefinition):
        return raw_input
    else:
        _filled_raw_input = {**{"options": {}}, **raw_input}
        return cls._from_dict(_filled_raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        "column": self.column,
        "options": self.options.as_dict(),
    }
class TableVectorIndexOptions (metric: str | UnsetType = (unset), source_model: str | UnsetType = (unset))

An object describing the options for a table vector index, which is the index that enables vector (ANN) search on a column.

Both when creating indexes and retrieving index metadata from the API, instances of TableIndexOptions are used to express the corresponding index settings.

Attributes

metric
the similarity metric used in the index. It must be one of the strings defined in VectorMetric (such as "dot_product").
source_model
an optional parameter to help the index pick the set of parameters best suited to a specific embedding model. If omitted, the Data API will use its defaults. See the Data API documentation for more details.
Expand source code
@dataclass
class TableVectorIndexOptions:
    """
    An object describing the options for a table vector index, which is the index
    that enables vector (ANN) search on a column.

    Both when creating indexes and retrieving index metadata from the API, instances
    of TableIndexOptions are used to express the corresponding index settings.

    Attributes:
        metric: the similarity metric used in the index. It must be one of the strings
            defined in `astrapy.constants.VectorMetric` (such as "dot_product").
        source_model: an optional parameter to help the index pick the set of
            parameters best suited to a specific embedding model. If omitted, the Data
            API will use its defaults. See the Data API documentation for more details.
    """

    metric: str | UnsetType = _UNSET
    source_model: str | UnsetType = _UNSET

    def __repr__(self) -> str:
        not_null_pieces = [
            pc
            for pc in (
                None if isinstance(self.metric, UnsetType) else f"metric={self.metric}",
                None
                if isinstance(self.source_model, UnsetType)
                else f"source_model={self.source_model}",
            )
            if pc is not None
        ]
        inner_desc = ", ".join(not_null_pieces)
        return f"{self.__class__.__name__}({inner_desc})"

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "metric": None if isinstance(self.metric, UnsetType) else self.metric,
                "sourceModel": None
                if isinstance(self.source_model, UnsetType)
                else self.source_model,
            }.items()
            if v is not None
        }

    @classmethod
    def _from_dict(cls, raw_dict: dict[str, Any]) -> TableVectorIndexOptions:
        """
        Create an instance of TableIndexOptions from a dictionary
        such as one from the Data API.
        """

        _warn_residual_keys(cls, raw_dict, {"metric", "sourceModel"})
        return TableVectorIndexOptions(
            metric=raw_dict["metric"] if raw_dict.get("metric") is not None else _UNSET,
            source_model=raw_dict["sourceModel"]
            if raw_dict.get("sourceModel") is not None
            else _UNSET,
        )

    @classmethod
    def coerce(
        cls, raw_input: TableVectorIndexOptions | dict[str, Any] | None
    ) -> TableVectorIndexOptions:
        """
        Normalize the input, whether an object already or a plain dictionary
        of the right structure, into a TableVectorIndexOptions.
        """

        if isinstance(raw_input, TableVectorIndexOptions):
            return raw_input
        elif raw_input is None:
            return cls(metric=_UNSET, source_model=_UNSET)
        else:
            return cls._from_dict(raw_input)

Class variables

var metric : str | UnsetType
var source_model : str | UnsetType

Static methods

def coerce(raw_input: TableVectorIndexOptions | dict[str, Any] | None) ‑> TableVectorIndexOptions

Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexOptions.

Expand source code
@classmethod
def coerce(
    cls, raw_input: TableVectorIndexOptions | dict[str, Any] | None
) -> TableVectorIndexOptions:
    """
    Normalize the input, whether an object already or a plain dictionary
    of the right structure, into a TableVectorIndexOptions.
    """

    if isinstance(raw_input, TableVectorIndexOptions):
        return raw_input
    elif raw_input is None:
        return cls(metric=_UNSET, source_model=_UNSET)
    else:
        return cls._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "metric": None if isinstance(self.metric, UnsetType) else self.metric,
            "sourceModel": None
            if isinstance(self.source_model, UnsetType)
            else self.source_model,
        }.items()
        if v is not None
    }
class VectorServiceOptions (provider: str | None, model_name: str | None, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None)

The "vector.service" component of the collection options. See the Data API specifications for allowed values.

Attributes

provider
the name of a service provider for embedding calculation.
model_name
the name of a specific model for use by the service.
authentication
a key-value dictionary for the "authentication" specification, if any, in the vector service options.
parameters
a key-value dictionary for the "parameters" specification, if any, in the vector service options.
Expand source code
@dataclass
class VectorServiceOptions:
    """
    The "vector.service" component of the collection options.
    See the Data API specifications for allowed values.

    Attributes:
        provider: the name of a service provider for embedding calculation.
        model_name: the name of a specific model for use by the service.
        authentication: a key-value dictionary for the "authentication" specification,
            if any, in the vector service options.
        parameters: a key-value dictionary for the "parameters" specification, if any,
            in the vector service options.
    """

    provider: str | None
    model_name: str | None
    authentication: dict[str, Any] | None = None
    parameters: dict[str, Any] | None = None

    def as_dict(self) -> dict[str, Any]:
        """Recast this object into a dictionary."""

        return {
            k: v
            for k, v in {
                "provider": self.provider,
                "modelName": self.model_name,
                "authentication": self.authentication,
                "parameters": self.parameters,
            }.items()
            if v is not None
        }

    @staticmethod
    def _from_dict(
        raw_dict: dict[str, Any] | None,
    ) -> VectorServiceOptions | None:
        """
        Create an instance of VectorServiceOptions from a dictionary
        such as one from the Data API.
        """

        if raw_dict is not None:
            return VectorServiceOptions(
                provider=raw_dict.get("provider"),
                model_name=raw_dict.get("modelName"),
                authentication=raw_dict.get("authentication"),
                parameters=raw_dict.get("parameters"),
            )
        else:
            return None

    @staticmethod
    def coerce(
        raw_input: VectorServiceOptions | dict[str, Any] | None,
    ) -> VectorServiceOptions | None:
        if isinstance(raw_input, VectorServiceOptions):
            return raw_input
        else:
            return VectorServiceOptions._from_dict(raw_input)

Class variables

var authentication : dict[str, typing.Any] | None
var model_name : str | None
var parameters : dict[str, typing.Any] | None
var provider : str | None

Static methods

def coerce(raw_input: VectorServiceOptions | dict[str, Any] | None) ‑> VectorServiceOptions | None
Expand source code
@staticmethod
def coerce(
    raw_input: VectorServiceOptions | dict[str, Any] | None,
) -> VectorServiceOptions | None:
    if isinstance(raw_input, VectorServiceOptions):
        return raw_input
    else:
        return VectorServiceOptions._from_dict(raw_input)

Methods

def as_dict(self) ‑> dict[str, typing.Any]

Recast this object into a dictionary.

Expand source code
def as_dict(self) -> dict[str, Any]:
    """Recast this object into a dictionary."""

    return {
        k: v
        for k, v in {
            "provider": self.provider,
            "modelName": self.model_name,
            "authentication": self.authentication,
            "parameters": self.parameters,
        }.items()
        if v is not None
    }