Module astrapy.info
Classes
class AlterTableAddColumns (columns: dict[str, TableColumnTypeDescriptor | dict[str, Any]])-
Expand source code
@dataclass class AlterTableAddColumns(AlterTableOperation): """ An object representing the alter-table operation of adding column(s), for use as argument to the table's `alter()` method. Attributes: columns: a mapping between the names of the columns to add and `TableColumnTypeDescriptor` objects, formatted in the same way as the `columns` attribute of `CreateTableDefinition`. """ columns: dict[str, TableColumnTypeDescriptor] def __init__( self, columns: dict[str, TableColumnTypeDescriptor | dict[str, Any]] ) -> None: self._name = "add" self.columns = { col_n: TableColumnTypeDescriptor.coerce(col_v) for col_n, col_v in columns.items() } def __repr__(self) -> str: _col_desc = f"columns=[{','.join(sorted(self.columns.keys()))}]" return f"{self.__class__.__name__}({_col_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": {col_n: col_v.as_dict() for col_n, col_v in self.columns.items()} } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableAddColumns: """ Create an instance of AlterTableAddColumns from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"columns"}) return AlterTableAddColumns( columns={ col_n: TableColumnTypeDescriptor.coerce(col_v) for col_n, col_v in raw_dict["columns"].items() }, ) @classmethod def coerce( cls, raw_input: AlterTableAddColumns | dict[str, Any] ) -> AlterTableAddColumns: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddColumns. """ if isinstance(raw_input, AlterTableAddColumns): return raw_input else: return cls._from_dict(raw_input)An object representing the alter-table operation of adding column(s), for use as argument to the table's
alter()method.Attributes
columns- a mapping between the names of the columns to add and
TableColumnTypeDescriptorobjects, formatted in the same way as thecolumnsattribute ofCreateTableDefinition.
Ancestors
- AlterTableOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTableAddColumns | dict[str, Any]) ‑> AlterTableAddColumns-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddColumns.
Instance variables
var columns : dict[str, TableColumnTypeDescriptor]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": {col_n: col_v.as_dict() for col_n, col_v in self.columns.items()} }Recast this object into a dictionary.
Inherited members
class AlterTableAddVectorize (columns: dict[str, VectorServiceOptions | dict[str, Any]])-
Expand source code
@dataclass class AlterTableAddVectorize(AlterTableOperation): """ An object representing the alter-table operation of enabling the vectorize service (i.e. server-side embedding computation) on one or more columns, for use as argument to the table's `alter()` method. Attributes: columns: a mapping between column names and the corresponding `VectorServiceOptions` objects describing the settings for the desired vectorize service. """ columns: dict[str, VectorServiceOptions] def __init__( self, columns: dict[str, VectorServiceOptions | dict[str, Any]] ) -> None: self._name = "addVectorize" columns_ = { col_n: VectorServiceOptions.coerce(col_v) for col_n, col_v in columns.items() } if any(_col_svc is None for _col_svc in columns_.values()): raise ValueError( "Vector service definition cannot be None for AlterTableAddVectorize" ) self.columns = cast(dict[str, VectorServiceOptions], columns_) def __repr__(self) -> str: _cols_desc = [ f"{col_n}({col_svc.provider}/{col_svc.model_name})" for col_n, col_svc in sorted(self.columns.items()) ] return f"{self.__class__.__name__}(columns={', '.join(_cols_desc)})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": { col_n: col_svc.as_dict() for col_n, col_svc in self.columns.items() } } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableAddVectorize: """ Create an instance of AlterTableAddVectorize from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"columns"}) _columns: dict[str, VectorServiceOptions | None] = { col_n: VectorServiceOptions.coerce(col_v) for col_n, col_v in raw_dict["columns"].items() } if any(_col_svc is None for _col_svc in _columns.values()): raise ValueError( "Vector service definition cannot be None for AlterTableAddVectorize" ) return AlterTableAddVectorize( columns=cast( dict[str, Union[VectorServiceOptions, dict[str, Any]]], _columns, ) ) @classmethod def coerce( cls, raw_input: AlterTableAddVectorize | dict[str, Any] ) -> AlterTableAddVectorize: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddVectorize. """ if isinstance(raw_input, AlterTableAddVectorize): return raw_input else: return cls._from_dict(raw_input)An object representing the alter-table operation of enabling the vectorize service (i.e. server-side embedding computation) on one or more columns, for use as argument to the table's
alter()method.Attributes
columns- a mapping between column names and the corresponding
VectorServiceOptionsobjects describing the settings for the desired vectorize service.
Ancestors
- AlterTableOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTableAddVectorize | dict[str, Any]) ‑> AlterTableAddVectorize-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableAddVectorize.
Instance variables
var columns : dict[str, VectorServiceOptions]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": { col_n: col_svc.as_dict() for col_n, col_svc in self.columns.items() } }Recast this object into a dictionary.
Inherited members
class AlterTableDropColumns (columns: list[str] | str)-
Expand source code
@dataclass class AlterTableDropColumns(AlterTableOperation): """ An object representing the alter-table operation of dropping column(s), for use as argument to the table's `alter()` method. Attributes: columns: a list of the column names to drop. Passing a single string has the same effect as passing a single-item list. """ columns: list[str] def __init__(self, columns: list[str] | str) -> None: self._name = "drop" self.columns = [columns] if isinstance(columns, str) else columns def __repr__(self) -> str: _col_desc = f"columns=[{','.join(self.columns)}]" return f"{self.__class__.__name__}({_col_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": self.columns, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableDropColumns: """ Create an instance of AlterTableDropColumns from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"columns"}) return AlterTableDropColumns( columns=raw_dict["columns"], ) @classmethod def coerce( cls, raw_input: AlterTableDropColumns | dict[str, Any] ) -> AlterTableDropColumns: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropColumns. """ if isinstance(raw_input, AlterTableDropColumns): return raw_input else: return cls._from_dict(raw_input)An object representing the alter-table operation of dropping column(s), for use as argument to the table's
alter()method.Attributes
columns- a list of the column names to drop. Passing a single string has the same effect as passing a single-item list.
Ancestors
- AlterTableOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTableDropColumns | dict[str, Any]) ‑> AlterTableDropColumns-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropColumns.
Instance variables
var columns : list[str]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": self.columns, }Recast this object into a dictionary.
Inherited members
class AlterTableDropVectorize (columns: list[str] | str)-
Expand source code
@dataclass class AlterTableDropVectorize(AlterTableOperation): """ An object representing the alter-table operation of removing the vectorize service (i.e. the server-side embedding computation) from one or more columns, for use as argument to the table's `alter()` method. Note: this operation does not drop the column, simply unsets its vectorize service. Existing embedding vectors, stored in the table, are retained. Attributes: columns: a list of the column names whose vectorize service is to be removed. Passing a single string has the same effect as passing a single-item list. """ columns: list[str] def __init__(self, columns: list[str] | str) -> None: self._name = "dropVectorize" self.columns = [columns] if isinstance(columns, str) else columns def __repr__(self) -> str: _col_desc = f"columns=[{','.join(self.columns)}]" return f"{self.__class__.__name__}({_col_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": self.columns, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTableDropVectorize: """ Create an instance of AlterTableDropVectorize from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"columns"}) return AlterTableDropVectorize( columns=raw_dict["columns"], ) @classmethod def coerce( cls, raw_input: AlterTableDropVectorize | dict[str, Any] ) -> AlterTableDropVectorize: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropVectorize. """ if isinstance(raw_input, AlterTableDropVectorize): return raw_input else: return cls._from_dict(raw_input)An object representing the alter-table operation of removing the vectorize service (i.e. the server-side embedding computation) from one or more columns, for use as argument to the table's
alter()method.Note: this operation does not drop the column, simply unsets its vectorize service. Existing embedding vectors, stored in the table, are retained.
Attributes
columns- a list of the column names whose vectorize service is to be removed. Passing a single string has the same effect as passing a single-item list.
Ancestors
- AlterTableOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTableDropVectorize | dict[str, Any]) ‑> AlterTableDropVectorize-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTableDropVectorize.
Instance variables
var columns : list[str]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "columns": self.columns, }Recast this object into a dictionary.
Inherited members
class AlterTypeAddFields (*, fields: dict[str, TableColumnTypeDescriptor | dict[str, Any] | str])-
Expand source code
@dataclass class AlterTypeAddFields(AlterTypeOperation): """ An object representing the alter-type operation of adding field(s), for use in the argument to the database's `alter_type()` method. Attributes: fields: a mapping between the names of the fields to add and `TableColumnTypeDescriptor` objects, formatted in the same way as the `columns` attribute of `CreateTableDefinition`. """ fields: dict[str, TableColumnTypeDescriptor] def __init__( self, *, fields: dict[str, TableColumnTypeDescriptor | dict[str, Any] | str], ) -> None: self._name = "add" self.fields = { fld_n: TableColumnTypeDescriptor.coerce(fld_v) for fld_n, fld_v in fields.items() } def __repr__(self) -> str: _fld_desc = f"fields=[{','.join(sorted(self.fields.keys()))}]" return f"{self.__class__.__name__}({_fld_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": {fld_n: fld_v.as_dict() for fld_n, fld_v in self.fields.items()} } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTypeAddFields: """ Create an instance of AlterTypeAddFields from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"fields"}) return AlterTypeAddFields(fields=raw_dict["fields"]) @classmethod def coerce( cls, raw_input: AlterTypeAddFields | dict[str, Any] ) -> AlterTypeAddFields: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTypeAddFields. """ if isinstance(raw_input, AlterTypeAddFields): return raw_input else: return cls._from_dict(raw_input) @classmethod def stack(cls, operations: list[AlterTypeAddFields]) -> AlterTypeAddFields: fields_dict: dict[str, TableColumnTypeDescriptor | dict[str, Any] | str] = {} for ataf in operations: fields_dict = {**fields_dict, **ataf.fields} return AlterTypeAddFields(fields=fields_dict)An object representing the alter-type operation of adding field(s), for use in the argument to the database's
alter_type()method.Attributes
fields- a mapping between the names of the fields to add and
TableColumnTypeDescriptorobjects, formatted in the same way as thecolumnsattribute ofCreateTableDefinition.
Ancestors
- AlterTypeOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTypeAddFields | dict[str, Any]) ‑> AlterTypeAddFields-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTypeAddFields.
def stack(operations: list[AlterTypeAddFields]) ‑> AlterTypeAddFields
Instance variables
var fields : dict[str, TableColumnTypeDescriptor]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": {fld_n: fld_v.as_dict() for fld_n, fld_v in self.fields.items()} }Recast this object into a dictionary.
Inherited members
class AlterTypeOperation (_name: str)-
Expand source code
@dataclass class AlterTypeOperation(ABC): """ An abstract class representing a generic "alter type" operation, i.e. a change to be applied to a user-defined type (UDT) stored on the database. Concrete implementations are used to represent operations such as adding/renaming fields. `AlterTypeOperation` objects are used in the Database's `alter_type` method. Please consult the documentation of the concrete subclasses for more info. """ _name: str @abstractmethod def as_dict(self) -> dict[str, Any]: ... @staticmethod def from_full_dict(operation_dict: dict[str, Any]) -> AlterTypeOperation: """ Inspect a provided dictionary and make it into the correct concrete subclass of AlterTypeOperation depending on its contents. Note: while the nature of the operation must be the top-level single key of the (nested) dictionary parameter to this method (such as "add" or "rename"), the resulting `AlterTypeOperation` object encodes the content of the corresponding value. Likewise, calling the `as_dict()` method of the result from this method does not return the whole original input, rather the "one level in" part (see the example provided here). Args: operation_dict: a dictionary such as `{"add": ...}`, whose outermost *value* corresponds to the desired operation. Returns: an `AlterTypeOperation` object chosen after inspection of the provided input. Example: >>> full_dict = {"add": {"fields": { ... "fld1": "text", "fld2": {"type": "int"} ... }}} >>> alter_op = AlterTypeOperation.from_full_dict(full_dict) >>> alter_op AlterTypeAddFields(fields=[fld1,fld2]) >>> alter_op.as_dict() {'fields': {'fld1': {'type': 'text'}, 'fld2': {'type': 'int'}}} """ key_set = set(operation_dict.keys()) if key_set == {"add"}: return AlterTypeAddFields.coerce(operation_dict["add"]) elif key_set == {"rename"}: return AlterTypeRenameFields.coerce(operation_dict["rename"]) else: raise ValueError( f"Cannot parse a dict with keys {', '.join(sorted(key_set))} " "into an AlterTypeOperation" ) @classmethod @abstractmethod def coerce(cls: type[AYO], raw_input: AYO | dict[str, Any]) -> AYO: ... @classmethod @abstractmethod def stack(cls: type[AYO], operations: list[AYO]) -> AYO: ... @classmethod def stack_by_name( cls, operations: list[AlterTypeOperation] ) -> dict[str, AlterTypeOperation]: grouped_ops: dict[str, list[AlterTypeOperation]] = {} for op in operations: grouped_ops[op._name] = grouped_ops.get(op._name, []) + [op] stacked_op_map: dict[str, AlterTypeOperation] = { op_name: op_list[0].stack(op_list) for op_name, op_list in grouped_ops.items() } return stacked_op_mapAn abstract class representing a generic "alter type" operation, i.e. a change to be applied to a user-defined type (UDT) stored on the database. Concrete implementations are used to represent operations such as adding/renaming fields.
AlterTypeOperationobjects are used in the Database'salter_typemethod.Please consult the documentation of the concrete subclasses for more info.
Ancestors
- abc.ABC
Subclasses
Static methods
def coerce(raw_input: AYO | dict[str, Any]) ‑> ~AYOdef from_full_dict(operation_dict: dict[str, Any]) ‑> AlterTypeOperation-
Expand source code
@staticmethod def from_full_dict(operation_dict: dict[str, Any]) -> AlterTypeOperation: """ Inspect a provided dictionary and make it into the correct concrete subclass of AlterTypeOperation depending on its contents. Note: while the nature of the operation must be the top-level single key of the (nested) dictionary parameter to this method (such as "add" or "rename"), the resulting `AlterTypeOperation` object encodes the content of the corresponding value. Likewise, calling the `as_dict()` method of the result from this method does not return the whole original input, rather the "one level in" part (see the example provided here). Args: operation_dict: a dictionary such as `{"add": ...}`, whose outermost *value* corresponds to the desired operation. Returns: an `AlterTypeOperation` object chosen after inspection of the provided input. Example: >>> full_dict = {"add": {"fields": { ... "fld1": "text", "fld2": {"type": "int"} ... }}} >>> alter_op = AlterTypeOperation.from_full_dict(full_dict) >>> alter_op AlterTypeAddFields(fields=[fld1,fld2]) >>> alter_op.as_dict() {'fields': {'fld1': {'type': 'text'}, 'fld2': {'type': 'int'}}} """ key_set = set(operation_dict.keys()) if key_set == {"add"}: return AlterTypeAddFields.coerce(operation_dict["add"]) elif key_set == {"rename"}: return AlterTypeRenameFields.coerce(operation_dict["rename"]) else: raise ValueError( f"Cannot parse a dict with keys {', '.join(sorted(key_set))} " "into an AlterTypeOperation" )Inspect a provided dictionary and make it into the correct concrete subclass of AlterTypeOperation depending on its contents.
Note: while the nature of the operation must be the top-level single key of the (nested) dictionary parameter to this method (such as "add" or "rename"), the resulting
AlterTypeOperationobject encodes the content of the corresponding value. Likewise, calling theas_dict()method of the result from this method does not return the whole original input, rather the "one level in" part (see the example provided here).Args
operation_dict- a dictionary such as
{"add": ...}, whose outermost value
corresponds to the desired operation.
Returns
an
AlterTypeOperationobject chosen after inspection of the provided input.Example
>>> full_dict = {"add": {"fields": { ... "fld1": "text", "fld2": {"type": "int"} ... }}} >>> alter_op = AlterTypeOperation.from_full_dict(full_dict) >>> alter_op AlterTypeAddFields(fields=[fld1,fld2]) >>> alter_op.as_dict() {'fields': {'fld1': {'type': 'text'}, 'fld2': {'type': 'int'}}} def stack(operations: list[AYO]) ‑> ~AYOdef stack_by_name(operations: list[AlterTypeOperation]) ‑> dict[str, AlterTypeOperation]
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
@abstractmethod def as_dict(self) -> dict[str, Any]: ...
class AlterTypeRenameFields (*, fields: dict[str, str])-
Expand source code
@dataclass class AlterTypeRenameFields(AlterTypeOperation): """ An object representing the alter-type operation of renaming field(s), for use in the argument to the database's `alter_type()` method. Attributes: fields: a mapping from current to new names for the fields to rename. """ fields: dict[str, str] def __init__(self, *, fields: dict[str, str]) -> None: self._name = "rename" self.fields = fields def __repr__(self) -> str: _renames = "; ".join( [f"'{old_n}' -> '{new_n}'" for old_n, new_n in sorted(self.fields.items())] ) return f"{self.__class__.__name__}(fields=[{_renames}])" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": self.fields, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AlterTypeRenameFields: """ Create an instance of AlterTypeRenameFields from a dictionary such as one suitable as (partial) command payload. """ _warn_residual_keys(cls, raw_dict, {"fields"}) return AlterTypeRenameFields( fields={old_n: new_n for old_n, new_n in raw_dict["fields"].items()}, ) @classmethod def coerce( cls, raw_input: AlterTypeRenameFields | dict[str, Any] ) -> AlterTypeRenameFields: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTypeRenameFields. """ if isinstance(raw_input, AlterTypeRenameFields): return raw_input else: return cls._from_dict(raw_input) @classmethod def stack(cls, operations: list[AlterTypeRenameFields]) -> AlterTypeRenameFields: fields_dict: dict[str, str] = {} for ataf in operations: fields_dict = {**fields_dict, **ataf.fields} return AlterTypeRenameFields(fields=fields_dict)An object representing the alter-type operation of renaming field(s), for use in the argument to the database's
alter_type()method.Attributes
fields- a mapping from current to new names for the fields to rename.
Ancestors
- AlterTypeOperation
- abc.ABC
Static methods
def coerce(raw_input: AlterTypeRenameFields | dict[str, Any]) ‑> AlterTypeRenameFields-
Normalize the input, whether an object already or a plain dictionary of the right structure, into an AlterTypeRenameFields.
def stack(operations: list[AlterTypeRenameFields]) ‑> AlterTypeRenameFields
Instance variables
var fields : dict[str, str]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": self.fields, }Recast this object into a dictionary.
Inherited members
class AstraDBAdminDatabaseInfo (*, environment: str, raw_dict: dict[str, Any])-
Expand source code
@dataclass class AstraDBAdminDatabaseInfo(_BaseAstraDBDatabaseInfo): """ A class representing the information of an Astra DB database, including region details. This is the type of the response from the AstraDBDatabaseAdmin `info` method. Note: This class, if applicable, describes a multi-region database in all its regions, as opposed to the `AstraDBDatabaseInfo`. Attributes: id: the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef". name: the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org. keyspaces: A list of the keyspaces available in the database. status: A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses). environment: a string identifying the environment for the database. In the typical usage, this equals "prod". cloud_provider: a string describing the cloud provider hosting the database. raw: a dictionary containing the full response from the DevOps API call to obtain the database information. created_at: information about when the database has been created. last_used: information about when the database was accessed last. org_id: the ID of the Astra organization the database belongs to, in the form of a UUID string with dashes. owner_id: the ID of the Astra account owning the database, in the form of a UUID string with dashes. regions: a list of `AstraDBAdminDatabaseRegionInfo` objects, one for each of the regions the database is replicated to. Note: The `raw_info` dictionary usually has a `region` key describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible that `database_info.region != database_info.raw_info["region"]`. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when calling `Admin.list_databases()`, all fields except `environment` (e.g. keyspace, region, etc) are set as found on the DevOps API response directly. """ created_at: datetime.datetime | None last_used: datetime.datetime | None org_id: str owner_id: str regions: list[AstraDBAdminDatabaseRegionInfo] def __init__( self, *, environment: str, raw_dict: dict[str, Any], ) -> None: self.created_at = _failsafe_parse_date(raw_dict.get("creationTime")) self.last_used = _failsafe_parse_date(raw_dict.get("lastUsageTime")) self.org_id = raw_dict["orgId"] self.owner_id = raw_dict["ownerId"] _BaseAstraDBDatabaseInfo.__init__( self=self, environment=environment, raw_dict=raw_dict, ) self.regions = [ AstraDBAdminDatabaseRegionInfo( raw_datacenter_dict=raw_datacenter_dict, environment=environment, database_id=self.id, ) for raw_datacenter_dict in raw_dict["info"]["datacenters"] ] def __repr__(self) -> str: pieces = [ _BaseAstraDBDatabaseInfo._inner_desc(self), f"created_at={self.created_at}", f"last_used={self.last_used}", f"org_id={self.org_id}", f"owner_id={self.owner_id}", f"regions={self.regions}", "raw=...", ] return f"{self.__class__.__name__}({', '.join(pieces)})"A class representing the information of an Astra DB database, including region details. This is the type of the response from the AstraDBDatabaseAdmin
infomethod.Note
This class, if applicable, describes a multi-region database in all its regions, as opposed to the
AstraDBDatabaseInfo.Attributes
id- the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef".
name- the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org.
keyspaces- A list of the keyspaces available in the database.
status- A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses).
environment- a string identifying the environment for the database. In the typical usage, this equals "prod".
cloud_provider- a string describing the cloud provider hosting the database.
raw- a dictionary containing the full response from the DevOps API call to obtain the database information.
created_at- information about when the database has been created.
last_used- information about when the database was accessed last.
org_id- the ID of the Astra organization the database belongs to, in the form of a UUID string with dashes.
owner_id- the ID of the Astra account owning the database, in the form of a UUID string with dashes.
regions- a list of
AstraDBAdminDatabaseRegionInfoobjects, one for each of the regions the database is replicated to.
Note
The
raw_infodictionary usually has aregionkey describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible thatdatabase_info.region != database_info.raw_info["region"]. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when callingAdmin.list_databases(), all fields exceptenvironment(e.g. keyspace, region, etc) are set as found on the DevOps API response directly.Ancestors
- astrapy.data.info.database_info._BaseAstraDBDatabaseInfo
Instance variables
var created_at : datetime.datetime | None-
The type of the None singleton.
var last_used : datetime.datetime | None-
The type of the None singleton.
var org_id : str-
The type of the None singleton.
var owner_id : str-
The type of the None singleton.
var regions : list[AstraDBAdminDatabaseRegionInfo]-
The type of the None singleton.
class AstraDBAvailableRegionInfo (classification: str,
cloud_provider: str,
display_name: str,
enabled: bool,
name: str,
reserved_for_qualified_users: bool,
zone: str)-
Expand source code
@dataclass class AstraDBAvailableRegionInfo: """ Represents a region information as returned by the `find_available_regions` method: in other words, it is a descriptor of a certain region available for database creation. Attributes: classification: level of access to the region, one of 'standard', 'premium' or 'premium_plus'. cloud_provider: one of 'gcp', 'aws' or 'azure'. display_name: a region "pretty name" e.g. for printing messages. enabled: a boolean flag marking whether the region is enabled. name: the short, ID-like name of the region. This can be used as an identifier since it determines a region uniquely. reserved_for_qualified_users: a boolean flag marking availability settings. zone: macro-zone for the region, e.g. "na" or "emea". """ classification: str cloud_provider: str display_name: str enabled: bool name: str reserved_for_qualified_users: bool zone: str def __repr__(self) -> str: body = f'{self.cloud_provider}/{self.name}: "{self.display_name}", ...' return f"{self.__class__.__name__}({body})" def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. """ return { "classification": self.classification, "cloudProvider": self.cloud_provider, "displayName": self.display_name, "enabled": self.enabled, "name": self.name, "region_type": "vector", "reservedForQualifiedUsers": self.reserved_for_qualified_users, "zone": self.zone, } @property @deprecated_property( new_name="name", deprecated_in="2.0.1", removed_in="2.3.0", ) def region_name(self) -> str: return self.name @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> AstraDBAvailableRegionInfo: """ Create an instance of AstraDBAvailableRegionInfo from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, { "classification", "cloudProvider", "displayName", "enabled", "name", "region_type", "reservedForQualifiedUsers", "zone", # The following intentionally suppresses a warning (until full PCU support) "pcu_types", }, ) return AstraDBAvailableRegionInfo( classification=raw_dict["classification"], cloud_provider=raw_dict["cloudProvider"], display_name=raw_dict["displayName"], enabled=raw_dict["enabled"], name=raw_dict["name"], reserved_for_qualified_users=raw_dict["reservedForQualifiedUsers"], zone=raw_dict["zone"], )Represents a region information as returned by the
find_available_regionsmethod: in other words, it is a descriptor of a certain region available for database creation.Attributes
classification- level of access to the region, one of 'standard', 'premium' or 'premium_plus'.
cloud_provider- one of 'gcp', 'aws' or 'azure'.
display_name- a region "pretty name" e.g. for printing messages.
enabled- a boolean flag marking whether the region is enabled.
name- the short, ID-like name of the region. This can be used as an identifier since it determines a region uniquely.
reserved_for_qualified_users- a boolean flag marking availability settings.
zone- macro-zone for the region, e.g. "na" or "emea".
Instance variables
var classification : str-
The type of the None singleton.
var cloud_provider : str-
The type of the None singleton.
var display_name : str-
The type of the None singleton.
var enabled : bool-
The type of the None singleton.
var name : str-
The type of the None singleton.
prop region_name : str-
Expand source code
@property @deprecated_property( new_name="name", deprecated_in="2.0.1", removed_in="2.3.0", ) def region_name(self) -> str: return self.name var reserved_for_qualified_users : bool-
The type of the None singleton.
var zone : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. """ return { "classification": self.classification, "cloudProvider": self.cloud_provider, "displayName": self.display_name, "enabled": self.enabled, "name": self.name, "region_type": "vector", "reservedForQualifiedUsers": self.reserved_for_qualified_users, "zone": self.zone, }Recast this object into a dictionary.
class AstraDBDatabaseInfo (*, environment: str, api_endpoint: str, raw_dict: dict[str, Any])-
Expand source code
@dataclass class AstraDBDatabaseInfo(_BaseAstraDBDatabaseInfo): """ A class representing the information of an Astra DB database, including region details. This is the type of the response from the Database `info` method. Note: a database can in general be replicated across multiple regions, in an active/active manner. Yet, when connecting to it, one always explicitly specifies a certain region: in other words, the connection (as represented by the `Database` class and analogous) is always done to a specific region. In this sense, this class represents the notion of "a database reached from a certain region". See class `AstraDBAdminDatabaseInfo` for (possibly) multi-region database information. Attributes: id: the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef". name: the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org. keyspaces: A list of the keyspaces available in the database. status: A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses). environment: a string identifying the environment for the database. In the typical usage, this equals "prod". cloud_provider: a string describing the cloud provider hosting the database. raw: a dictionary containing the full response from the DevOps API call to obtain the database information. region: the region this database is accessed through. api_endpoint: the API Endpoint used to connect to this database in this region. Note: The `raw_info` dictionary usually has a `region` key describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible that `database_info.region != database_info.raw_info["region"]`. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when calling `Admin.list_databases()`, all fields except `environment` (e.g. keyspace, region, etc) are set as found on the DevOps API response directly. """ region: str api_endpoint: str def __init__( self, *, environment: str, api_endpoint: str, raw_dict: dict[str, Any], ) -> None: self.api_endpoint = api_endpoint parsed_api_endpoint = parse_api_endpoint(self.api_endpoint) self.region = "" if parsed_api_endpoint is None else parsed_api_endpoint.region _BaseAstraDBDatabaseInfo.__init__( self=self, environment=environment, raw_dict=raw_dict, ) def __repr__(self) -> str: pieces = [ _BaseAstraDBDatabaseInfo._inner_desc(self), f"region={self.region}", f"api_endpoint={self.api_endpoint}", "raw=...", ] return f"{self.__class__.__name__}({', '.join(pieces)})"A class representing the information of an Astra DB database, including region details. This is the type of the response from the Database
infomethod.Note
a database can in general be replicated across multiple regions, in an active/active manner. Yet, when connecting to it, one always explicitly specifies a certain region: in other words, the connection (as represented by the
Databaseclass and analogous) is always done to a specific region. In this sense, this class represents the notion of "a database reached from a certain region". See classAstraDBAdminDatabaseInfofor (possibly) multi-region database information.Attributes
id- the Database ID, in the form of a UUID string with dashes. Example: "01234567-89ab-cdef-0123-456789abcdef".
name- the name of the database as set by the user at creation time. The database name is not necessarily unique across databases in an org.
keyspaces- A list of the keyspaces available in the database.
status- A string describing the current status of the database. Example values are: "ACTIVE", "MAINTENANCE", "INITIALIZING", and others (see the DevOps API documentation for more on database statuses).
environment- a string identifying the environment for the database. In the typical usage, this equals "prod".
cloud_provider- a string describing the cloud provider hosting the database.
raw- a dictionary containing the full response from the DevOps API call to obtain the database information.
region- the region this database is accessed through.
api_endpoint- the API Endpoint used to connect to this database in this region.
Note
The
raw_infodictionary usually has aregionkey describing the default region as configured in the database, which does not necessarily (for multi-region databases) match the region through which the connection is established: the latter is the one specified by the "api endpoint" used for connecting. In other words, for multi-region databases it is possible thatdatabase_info.region != database_info.raw_info["region"]. Conversely, in case of a AstraDBDatabaseInfo not obtained through a connected database, such as when callingAdmin.list_databases(), all fields exceptenvironment(e.g. keyspace, region, etc) are set as found on the DevOps API response directly.Ancestors
- astrapy.data.info.database_info._BaseAstraDBDatabaseInfo
Instance variables
var api_endpoint : str-
The type of the None singleton.
var region : str-
The type of the None singleton.
class CollectionDefaultIDOptions (default_id_type: str)-
Expand source code
@dataclass class CollectionDefaultIDOptions: """ The "defaultId" component of the collection options. See the Data API specifications for allowed values. Attributes: default_id_type: this setting determines what type of IDs the Data API will generate when inserting documents that do not specify their `_id` field explicitly. Can be set to any of the values `DefaultIdType.UUID`, `DefaultIdType.OBJECTID`, `DefaultIdType.UUIDV6`, `DefaultIdType.UUIDV7`, `DefaultIdType.DEFAULT`. """ default_id_type: str def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return {"type": self.default_id_type} @staticmethod def _from_dict( raw_dict: dict[str, Any] | None, ) -> CollectionDefaultIDOptions | None: """ Create an instance of CollectionDefaultIDOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return CollectionDefaultIDOptions(default_id_type=raw_dict["type"]) else: return NoneThe "defaultId" component of the collection options. See the Data API specifications for allowed values.
Attributes
default_id_type- this setting determines what type of IDs the Data API will
generate when inserting documents that do not specify their
_idfield explicitly. Can be set to any of the valuesDefaultIdType.UUID,DefaultIdType.OBJECTID,DefaultIdType.UUIDV6,DefaultIdType.UUIDV7,DefaultIdType.DEFAULT.
Instance variables
var default_id_type : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return {"type": self.default_id_type}Recast this object into a dictionary.
class CollectionDefinition (vector: CollectionVectorOptions | None = None,
lexical: CollectionLexicalOptions | None = None,
rerank: CollectionRerankOptions | None = None,
indexing: dict[str, Any] | None = None,
default_id: CollectionDefaultIDOptions | None = None)-
Expand source code
@dataclass class CollectionDefinition: """ A structure expressing the options of a collection. See the Data API specifications for detailed specification and allowed values. Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class. See the examples below and the Table documentation for more details. Attributes: vector: an optional CollectionVectorOptions object. lexical: A `CollectionLexicalOptions` object encoding the desired "lexical" settings. If omitted, the Data API defaults apply. rerank: A `CollectionRerankOptions` object encoding the desired "rerank" settings. If omitted, the Data API defaults apply. indexing: an optional dictionary with the "indexing" collection properties. This is in the form of a dictionary such as `{"deny": [...]}` or `{"allow": [...]}`, with a list of document paths, or alternatively just `["*"]`, to exclude from/include in collection indexing, respectively. default_id: an optional CollectionDefaultIDOptions object (see). Example: >>> from astrapy.constants import VectorMetric >>> from astrapy.info import CollectionDefinition, CollectionVectorOptions >>> >>> # Create a collection definition with the fluent interface: >>> collection_definition = ( ... CollectionDefinition.builder() ... .set_vector_dimension(3) ... .set_vector_metric(VectorMetric.DOT_PRODUCT) ... .set_indexing("deny", ["annotations", "logs"]) ... .build() ... ) >>> >>> # Create a collection definition passing everything to the constructor: >>> collection_definition_1 = CollectionDefinition( ... vector=CollectionVectorOptions( ... dimension=3, ... metric=VectorMetric.DOT_PRODUCT, ... ), ... indexing={"deny": ["annotations", "logs"]}, ... ) >>> >>> # Coerce a dictionary into a collection definition: >>> collection_definition_2_dict = { ... "indexing": {"deny": ["annotations", "logs"]}, ... "vector": { ... "dimension": 3, ... "metric": VectorMetric.DOT_PRODUCT, ... }, ... } >>> collection_definition_2 = CollectionDefinition.coerce( ... collection_definition_2_dict ... ) >>> >>> # The three created objects are exactly identical: >>> collection_definition_2 == collection_definition_1 True >>> collection_definition_2 == collection_definition True """ vector: CollectionVectorOptions | None = None lexical: CollectionLexicalOptions | None = None rerank: CollectionRerankOptions | None = None indexing: dict[str, Any] | None = None default_id: CollectionDefaultIDOptions | None = None def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ None if self.vector is None else f"vector={self.vector.__repr__()}", None if self.lexical is None else f"lexical={self.lexical.__repr__()}", None if self.rerank is None else f"rerank={self.rerank.__repr__()}", ( None if self.indexing is None else f"indexing={self.indexing.__repr__()}" ), ( None if self.default_id is None else f"default_id={self.default_id.__repr__()}" ), ] if pc is not None ] return f"{self.__class__.__name__}({', '.join(not_null_pieces)})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "vector": None if self.vector is None else self.vector.as_dict(), "lexical": None if self.lexical is None else self.lexical.as_dict(), "rerank": None if self.rerank is None else self.rerank.as_dict(), "indexing": self.indexing, "defaultId": ( None if self.default_id is None else self.default_id.as_dict() ), }.items() if v is not None if v != {} } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> CollectionDefinition: """ Create an instance of CollectionDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"vector", "lexical", "rerank", "indexing", "defaultId"} ) return CollectionDefinition( vector=CollectionVectorOptions._from_dict(raw_dict.get("vector")), lexical=CollectionLexicalOptions._from_dict(raw_dict.get("lexical")), rerank=CollectionRerankOptions._from_dict(raw_dict.get("rerank")), indexing=raw_dict.get("indexing"), default_id=CollectionDefaultIDOptions._from_dict(raw_dict.get("defaultId")), ) @classmethod def coerce( cls, raw_input: CollectionDefinition | dict[str, Any] ) -> CollectionDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDefinition. """ if isinstance(raw_input, CollectionDefinition): return raw_input else: return cls._from_dict(raw_input) @staticmethod def builder() -> CollectionDefinition: """ Create an "empty" builder for constructing a collection definition through a fluent interface. The resulting object has no defined properties, traits that can be added progressively with the corresponding methods. See the class docstring for a full example on using the fluent interface. Returns: a CollectionDefinition for the simplest possible creatable collection. """ return CollectionDefinition() def set_indexing( self, indexing_mode: str | None, indexing_target: list[str] | None = None ) -> CollectionDefinition: """ Return a new collection definition object with a new indexing setting. The indexing can be set to something (fully overwriting any pre-existing configuration), or removed entirely. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: indexing_mode: one of "allow" or "deny" to configure indexing, or None in case one wants to remove the setting. indexing_target: a list of the document paths covered by the allow/deny prescription. Passing this parameter when `indexing_mode` is None results in an error. Returns: a CollectionDefinition obtained by adding (or replacing) the desired indexing setting to this collection definition. """ if indexing_mode is None: if indexing_target is not None: raise ValueError("Cannot pass an indexing target if unsetting indexing") return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=None, default_id=self.default_id, ) _i_mode = indexing_mode.lower() if _i_mode not in INDEXING_ALLOWED_MODES: msg = ( f"Unknown indexing mode: '{indexing_mode}'. " f"Allowed values are: {', '.join(INDEXING_ALLOWED_MODES)}." ) raise ValueError(msg) _i_target: list[str] = indexing_target or [] return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing={indexing_mode: indexing_target}, default_id=self.default_id, ) def set_default_id(self, default_id_type: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection 'default ID type'. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: default_id_type: one of the values of `astrapy.constants.DefaultIdType` (or the equivalent string) to set a default ID type for a collection; alternatively, None to remove the corresponding configuration. Returns: a CollectionDefinition obtained by adding (or replacing) the desired default ID type setting to this collection definition. """ if default_id_type is None: return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=None, ) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=CollectionDefaultIDOptions( default_id_type=default_id_type, ), ) def set_vector_dimension(self, dimension: int | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector dimension. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: dimension: an integer, the number of components of vectors in the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) def set_vector_metric(self, metric: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector similarity metric. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: metric: a value of those in `astrapy.constants.VectorMetric`, or an equivalent string such as "dot_product", used for vector search within the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=metric, source_model=_vector_options.source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) def set_vector_source_model(self, source_model: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector 'source model' parameter. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: source_model: an optional string setting for the vector index, to help it pick the set of parameters best suited to a specific embedding model. See the Data API documentation for more details. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting - the Data API will use its defaults. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) def set_vector_service( self, provider: str | VectorServiceOptions | None, model_name: str | None = None, *, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vectorize (i.e. server-side embeddings) service. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: provider: this can be (1) a whole `VectorServiceOptions` object encoding all desired properties for a vectorize service; or (2) it can be None, to signify removal of the entire vectorize setting; alternatively, (3) it can be a string, the vectorize provider name as seen in the response from the database's `find_embedding_providers` method. In the latter case, the other parameters should also be provided as needed. See the examples below for an illustration of these usage patterns. model_name: a string, the name of the vectorize model to use (must be compatible with the chosen provider). authentication: a dictionary with the required authentication information if the vectorize makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account. parameters: a free-form key-value mapping providing additional, model-dependent configuration settings. The allowed parameters for a given model are specified in the response of the Database `find_embedding_providers` method. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. Example: >>> from astrapy.info import CollectionDefinition, VectorServiceOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> svc1 = zero.set_vector_service( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(svc1.build().as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myVecSvcOpt = VectorServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> svc2 = zero.set_vector_service(myVecSvcOpt).build() >>> print(svc2.as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> reset = svc1.set_vector_service(None).build() >>> print(reset.as_dict()) {} """ _vector_options = self.vector or CollectionVectorOptions() if isinstance(provider, VectorServiceOptions): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication' and 'parameters' " "cannot be passed when setting a VectorServiceOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=provider, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) else: new_service: VectorServiceOptions | None if provider is None: if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication' and 'parameters' " "cannot be passed when unsetting the vector service." ) raise ValueError(msg) new_service = None else: new_service = VectorServiceOptions( provider=provider, model_name=model_name, authentication=authentication, parameters=parameters, ) return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=new_service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) def set_rerank( self, provider: str | CollectionRerankOptions | RerankServiceOptions | None | UnsetType = _UNSET, model_name: str | None = None, *, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None, enabled: bool | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a modified 'rerank' setting. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: provider: this can be (1) a `RerankServiceOptions` object encoding all desired properties for a reranking service; (2) a `CollectionRerankOptions`, that is likewise being set as the collection reranking configuration; or (3) it can be None, to unset the rerank setting, altogether from the collection definition, hence letting the API use its defaults; (4) it can be a string, the reranking provider name as seen in the response from the database's `find_rerank_providers` method. In the latter case, the other parameters should also be provided as needed. If this parameter is omitted, the `enabled` parameter must be supplied. See the examples below for an illustration of these usage patterns. model_name: a string, the name of the reranker model to use (must be compatible with the chosen provider). authentication: a dictionary with the required authentication information if the reranking makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account. parameters: a free-form key-value mapping providing additional, model-dependent configuration settings. The allowed parameters for a given model are specified in the response of the Database `find_rerank_providers` method. enabled: if passed, this flag is used in the reranking definition for the collection. If omitted, defaults to True. Returns: a CollectionDefinition obtained by adding, replacing or unsetting the rerank configuration of this collection definition. Example: >>> from astrapy.info import ( ... CollectionDefinition, ... CollectionRerankOptions, ... RerankServiceOptions, ... ) >>> >>> zero = CollectionDefinition.builder() >>> >>> rrk1 = zero.set_rerank( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(rrk1.build().as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myRrkSvcOpt = RerankServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> rrk2 = zero.set_rerank(myRrkSvcOpt).build() >>> print(rrk2.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myColRrkOpt = CollectionRerankOptions( ... enabled=True, ... service=myRrkSvcOpt, ... ) >>> rrk3 = zero.set_rerank(myColRrkOpt).build() >>> print(rrk3.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> rrk4 = rrk1.set_rerank(enabled=False).build() >>> print(rrk4.as_dict()) {'rerank': {'enabled': False}} >>> >>> reset = rrk1.set_rerank(None).build() >>> print(reset.as_dict()) {} """ if isinstance(provider, CollectionRerankOptions): if ( model_name is not None or authentication is not None or parameters is not None or enabled is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' and " "'enabled' cannot be passed when setting a " "CollectionRerankOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=provider, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(provider, RerankServiceOptions): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "cannot be passed when setting a RerankServiceOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=CollectionRerankOptions( enabled=enabled, service=provider, ), indexing=self.indexing, default_id=self.default_id, ) elif provider is None: if ( model_name is not None or authentication is not None or parameters is not None or enabled is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "and 'enabled' cannot be passed when unsetting 'rerank'." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=None, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(provider, UnsetType): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "cannot be passed when omitting 'provider'." ) raise ValueError(msg) if enabled is None: msg = ( "At least one of 'provider' and 'enabled' must be passed " "to `set_rerank`." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=CollectionRerankOptions(enabled=enabled), indexing=self.indexing, default_id=self.default_id, ) else: new_service = CollectionRerankOptions( enabled=enabled, service=RerankServiceOptions( provider=provider, model_name=model_name, authentication=authentication, parameters=parameters, ), ) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=new_service, indexing=self.indexing, default_id=self.default_id, ) def set_lexical( self, analyzer: str | dict[str, Any] | CollectionLexicalOptions | None | UnsetType = _UNSET, *, enabled: bool | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a modified 'lexical' setting. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: analyzer: this can be (1) a string or free-form dictionary, specifying the configuration for the collection analyzer; or (2) a ready `CollectionLexicalOptions` object encoding said configuration; alternatively (3) None, to unset 'lexical' altogether from the collection definition, hence letting the API use its defaults. If this parameter is omitted, the `enabled` parameter must be supplied. See the examples below for an illustration of these usage patterns. enabled: if passed, this flag is used in the lexical definition for the collection. If omitted, defaults to True. Returns: a CollectionDefinition obtained by adding, replacing or unsetting the lexical configuration of this collection definition. Example: >>> from astrapy.info import CollectionDefinition, CollectionLexicalOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> anz1 = zero.set_lexical( ... "analyzer_setting", ... ) >>> print(anz1.build().as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> myLexOpt = CollectionLexicalOptions(analyzer="analyzer_setting") >>> anz2 = zero.set_lexical(myLexOpt).build() >>> print(anz2.as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> reset = anz1.set_lexical(None).build() >>> print(reset.as_dict()) {} >>> anz3 = zero.set_lexical(enabled=False).build() >>> print(anz3.as_dict()) {'lexical': {'enabled': False}} """ if analyzer is None: if enabled is not None: msg = "Parameter 'enabled' cannot be passed when unsetting 'lexical'." raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=None, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(analyzer, CollectionLexicalOptions): if enabled is not None: msg = ( "Parameter 'enabled' cannot be passed when setting 'lexical' " "through a CollectionLexicalOptions object." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=analyzer, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(analyzer, UnsetType): if enabled is None: msg = ( "At least one of 'enabled' and 'analyzer' must be passed " "to set_lexical." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=CollectionLexicalOptions(enabled=enabled), rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) else: new_lexical = CollectionLexicalOptions( enabled=enabled, analyzer=analyzer, ) return CollectionDefinition( vector=self.vector, lexical=new_lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) def build(self) -> CollectionDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a collection definition ready for use in e.g. table creation. Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly. See the class docstring for a full example on using the fluent interface. Returns: a CollectionDefinition obtained by finalizing the definition being built so far. """ return selfA structure expressing the options of a collection. See the Data API specifications for detailed specification and allowed values.
Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class. See the examples below and the Table documentation for more details.
Attributes
vector- an optional CollectionVectorOptions object.
lexical- A
CollectionLexicalOptionsobject encoding the desired "lexical" settings. If omitted, the Data API defaults apply. rerank- A
CollectionRerankOptionsobject encoding the desired "rerank" settings. If omitted, the Data API defaults apply. indexing- an optional dictionary with the "indexing" collection properties.
This is in the form of a dictionary such as
{"deny": [...]}or{"allow": [...]}, with a list of document paths, or alternatively just["*"], to exclude from/include in collection indexing, respectively. default_id- an optional CollectionDefaultIDOptions object (see).
Example
>>> from astrapy.constants import VectorMetric >>> from astrapy.info import CollectionDefinition, CollectionVectorOptions >>> >>> # Create a collection definition with the fluent interface: >>> collection_definition = ( ... CollectionDefinition.builder() ... .set_vector_dimension(3) ... .set_vector_metric(VectorMetric.DOT_PRODUCT) ... .set_indexing("deny", ["annotations", "logs"]) ... .build() ... ) >>> >>> # Create a collection definition passing everything to the constructor: >>> collection_definition_1 = CollectionDefinition( ... vector=CollectionVectorOptions( ... dimension=3, ... metric=VectorMetric.DOT_PRODUCT, ... ), ... indexing={"deny": ["annotations", "logs"]}, ... ) >>> >>> # Coerce a dictionary into a collection definition: >>> collection_definition_2_dict = { ... "indexing": {"deny": ["annotations", "logs"]}, ... "vector": { ... "dimension": 3, ... "metric": VectorMetric.DOT_PRODUCT, ... }, ... } >>> collection_definition_2 = CollectionDefinition.coerce( ... collection_definition_2_dict ... ) >>> >>> # The three created objects are exactly identical: >>> collection_definition_2 == collection_definition_1 True >>> collection_definition_2 == collection_definition TrueStatic methods
def builder() ‑> CollectionDefinition-
Expand source code
@staticmethod def builder() -> CollectionDefinition: """ Create an "empty" builder for constructing a collection definition through a fluent interface. The resulting object has no defined properties, traits that can be added progressively with the corresponding methods. See the class docstring for a full example on using the fluent interface. Returns: a CollectionDefinition for the simplest possible creatable collection. """ return CollectionDefinition()Create an "empty" builder for constructing a collection definition through a fluent interface. The resulting object has no defined properties, traits that can be added progressively with the corresponding methods.
See the class docstring for a full example on using the fluent interface.
Returns
a CollectionDefinition for the simplest possible creatable collection.
def coerce(raw_input: CollectionDefinition | dict[str, Any]) ‑> CollectionDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDefinition.
Instance variables
var default_id : CollectionDefaultIDOptions | None-
The type of the None singleton.
var indexing : dict[str, typing.Any] | None-
The type of the None singleton.
var lexical : CollectionLexicalOptions | None-
The type of the None singleton.
var rerank : CollectionRerankOptions | None-
The type of the None singleton.
var vector : CollectionVectorOptions | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "vector": None if self.vector is None else self.vector.as_dict(), "lexical": None if self.lexical is None else self.lexical.as_dict(), "rerank": None if self.rerank is None else self.rerank.as_dict(), "indexing": self.indexing, "defaultId": ( None if self.default_id is None else self.default_id.as_dict() ), }.items() if v is not None if v != {} }Recast this object into a dictionary.
def build(self) ‑> CollectionDefinition-
Expand source code
def build(self) -> CollectionDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a collection definition ready for use in e.g. table creation. Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly. See the class docstring for a full example on using the fluent interface. Returns: a CollectionDefinition obtained by finalizing the definition being built so far. """ return selfThe final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a collection definition ready for use in e.g. table creation.
Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly.
See the class docstring for a full example on using the fluent interface.
Returns
a CollectionDefinition obtained by finalizing the definition being built so far.
def set_default_id(self, default_id_type: str | None) ‑> CollectionDefinition-
Expand source code
def set_default_id(self, default_id_type: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection 'default ID type'. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: default_id_type: one of the values of `astrapy.constants.DefaultIdType` (or the equivalent string) to set a default ID type for a collection; alternatively, None to remove the corresponding configuration. Returns: a CollectionDefinition obtained by adding (or replacing) the desired default ID type setting to this collection definition. """ if default_id_type is None: return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=None, ) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=CollectionDefaultIDOptions( default_id_type=default_id_type, ), )Return a new collection definition object with a new setting for the collection 'default ID type'. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
default_id_type- one of the values of
DefaultIdType(or the equivalent string) to set a default ID type for a collection; alternatively, None to remove the corresponding configuration.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired default ID type setting to this collection definition.
def set_indexing(self, indexing_mode: str | None, indexing_target: list[str] | None = None) ‑> CollectionDefinition-
Expand source code
def set_indexing( self, indexing_mode: str | None, indexing_target: list[str] | None = None ) -> CollectionDefinition: """ Return a new collection definition object with a new indexing setting. The indexing can be set to something (fully overwriting any pre-existing configuration), or removed entirely. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: indexing_mode: one of "allow" or "deny" to configure indexing, or None in case one wants to remove the setting. indexing_target: a list of the document paths covered by the allow/deny prescription. Passing this parameter when `indexing_mode` is None results in an error. Returns: a CollectionDefinition obtained by adding (or replacing) the desired indexing setting to this collection definition. """ if indexing_mode is None: if indexing_target is not None: raise ValueError("Cannot pass an indexing target if unsetting indexing") return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing=None, default_id=self.default_id, ) _i_mode = indexing_mode.lower() if _i_mode not in INDEXING_ALLOWED_MODES: msg = ( f"Unknown indexing mode: '{indexing_mode}'. " f"Allowed values are: {', '.join(INDEXING_ALLOWED_MODES)}." ) raise ValueError(msg) _i_target: list[str] = indexing_target or [] return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=self.rerank, indexing={indexing_mode: indexing_target}, default_id=self.default_id, )Return a new collection definition object with a new indexing setting. The indexing can be set to something (fully overwriting any pre-existing configuration), or removed entirely. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
indexing_mode- one of "allow" or "deny" to configure indexing, or None in case one wants to remove the setting.
indexing_target- a list of the document paths covered by the allow/deny
prescription. Passing this parameter when
indexing_modeis None results in an error.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired indexing setting to this collection definition.
def set_lexical(self,
analyzer: str | dict[str, Any] | CollectionLexicalOptions | None | UnsetType = (unset),
*,
enabled: bool | None = None) ‑> CollectionDefinition-
Expand source code
def set_lexical( self, analyzer: str | dict[str, Any] | CollectionLexicalOptions | None | UnsetType = _UNSET, *, enabled: bool | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a modified 'lexical' setting. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: analyzer: this can be (1) a string or free-form dictionary, specifying the configuration for the collection analyzer; or (2) a ready `CollectionLexicalOptions` object encoding said configuration; alternatively (3) None, to unset 'lexical' altogether from the collection definition, hence letting the API use its defaults. If this parameter is omitted, the `enabled` parameter must be supplied. See the examples below for an illustration of these usage patterns. enabled: if passed, this flag is used in the lexical definition for the collection. If omitted, defaults to True. Returns: a CollectionDefinition obtained by adding, replacing or unsetting the lexical configuration of this collection definition. Example: >>> from astrapy.info import CollectionDefinition, CollectionLexicalOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> anz1 = zero.set_lexical( ... "analyzer_setting", ... ) >>> print(anz1.build().as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> myLexOpt = CollectionLexicalOptions(analyzer="analyzer_setting") >>> anz2 = zero.set_lexical(myLexOpt).build() >>> print(anz2.as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> reset = anz1.set_lexical(None).build() >>> print(reset.as_dict()) {} >>> anz3 = zero.set_lexical(enabled=False).build() >>> print(anz3.as_dict()) {'lexical': {'enabled': False}} """ if analyzer is None: if enabled is not None: msg = "Parameter 'enabled' cannot be passed when unsetting 'lexical'." raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=None, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(analyzer, CollectionLexicalOptions): if enabled is not None: msg = ( "Parameter 'enabled' cannot be passed when setting 'lexical' " "through a CollectionLexicalOptions object." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=analyzer, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(analyzer, UnsetType): if enabled is None: msg = ( "At least one of 'enabled' and 'analyzer' must be passed " "to set_lexical." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=CollectionLexicalOptions(enabled=enabled), rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) else: new_lexical = CollectionLexicalOptions( enabled=enabled, analyzer=analyzer, ) return CollectionDefinition( vector=self.vector, lexical=new_lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a modified 'lexical' setting.
This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
analyzer- this can be (1) a string or free-form dictionary, specifying
the configuration for the collection analyzer; or (2) a ready
CollectionLexicalOptionsobject encoding said configuration; alternatively (3) None, to unset 'lexical' altogether from the collection definition, hence letting the API use its defaults. If this parameter is omitted, theenabledparameter must be supplied. See the examples below for an illustration of these usage patterns. enabled- if passed, this flag is used in the lexical definition for the collection. If omitted, defaults to True.
Returns
a CollectionDefinition obtained by adding, replacing or unsetting the lexical configuration of this collection definition.
Example
>>> from astrapy.info import CollectionDefinition, CollectionLexicalOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> anz1 = zero.set_lexical( ... "analyzer_setting", ... ) >>> print(anz1.build().as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> myLexOpt = CollectionLexicalOptions(analyzer="analyzer_setting") >>> anz2 = zero.set_lexical(myLexOpt).build() >>> print(anz2.as_dict()) {'lexical': {'enabled': True, 'analyzer': 'analyzer_setting'}} >>> reset = anz1.set_lexical(None).build() >>> print(reset.as_dict()) {} >>> anz3 = zero.set_lexical(enabled=False).build() >>> print(anz3.as_dict()) {'lexical': {'enabled': False}} def set_rerank(self,
provider: str | CollectionRerankOptions | RerankServiceOptions | None | UnsetType = (unset),
model_name: str | None = None,
*,
authentication: dict[str, Any] | None = None,
parameters: dict[str, Any] | None = None,
enabled: bool | None = None) ‑> CollectionDefinition-
Expand source code
def set_rerank( self, provider: str | CollectionRerankOptions | RerankServiceOptions | None | UnsetType = _UNSET, model_name: str | None = None, *, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None, enabled: bool | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a modified 'rerank' setting. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: provider: this can be (1) a `RerankServiceOptions` object encoding all desired properties for a reranking service; (2) a `CollectionRerankOptions`, that is likewise being set as the collection reranking configuration; or (3) it can be None, to unset the rerank setting, altogether from the collection definition, hence letting the API use its defaults; (4) it can be a string, the reranking provider name as seen in the response from the database's `find_rerank_providers` method. In the latter case, the other parameters should also be provided as needed. If this parameter is omitted, the `enabled` parameter must be supplied. See the examples below for an illustration of these usage patterns. model_name: a string, the name of the reranker model to use (must be compatible with the chosen provider). authentication: a dictionary with the required authentication information if the reranking makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account. parameters: a free-form key-value mapping providing additional, model-dependent configuration settings. The allowed parameters for a given model are specified in the response of the Database `find_rerank_providers` method. enabled: if passed, this flag is used in the reranking definition for the collection. If omitted, defaults to True. Returns: a CollectionDefinition obtained by adding, replacing or unsetting the rerank configuration of this collection definition. Example: >>> from astrapy.info import ( ... CollectionDefinition, ... CollectionRerankOptions, ... RerankServiceOptions, ... ) >>> >>> zero = CollectionDefinition.builder() >>> >>> rrk1 = zero.set_rerank( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(rrk1.build().as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myRrkSvcOpt = RerankServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> rrk2 = zero.set_rerank(myRrkSvcOpt).build() >>> print(rrk2.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myColRrkOpt = CollectionRerankOptions( ... enabled=True, ... service=myRrkSvcOpt, ... ) >>> rrk3 = zero.set_rerank(myColRrkOpt).build() >>> print(rrk3.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> rrk4 = rrk1.set_rerank(enabled=False).build() >>> print(rrk4.as_dict()) {'rerank': {'enabled': False}} >>> >>> reset = rrk1.set_rerank(None).build() >>> print(reset.as_dict()) {} """ if isinstance(provider, CollectionRerankOptions): if ( model_name is not None or authentication is not None or parameters is not None or enabled is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' and " "'enabled' cannot be passed when setting a " "CollectionRerankOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=provider, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(provider, RerankServiceOptions): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "cannot be passed when setting a RerankServiceOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=CollectionRerankOptions( enabled=enabled, service=provider, ), indexing=self.indexing, default_id=self.default_id, ) elif provider is None: if ( model_name is not None or authentication is not None or parameters is not None or enabled is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "and 'enabled' cannot be passed when unsetting 'rerank'." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=None, indexing=self.indexing, default_id=self.default_id, ) elif isinstance(provider, UnsetType): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication', 'parameters' " "cannot be passed when omitting 'provider'." ) raise ValueError(msg) if enabled is None: msg = ( "At least one of 'provider' and 'enabled' must be passed " "to `set_rerank`." ) raise ValueError(msg) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=CollectionRerankOptions(enabled=enabled), indexing=self.indexing, default_id=self.default_id, ) else: new_service = CollectionRerankOptions( enabled=enabled, service=RerankServiceOptions( provider=provider, model_name=model_name, authentication=authentication, parameters=parameters, ), ) return CollectionDefinition( vector=self.vector, lexical=self.lexical, rerank=new_service, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a modified 'rerank' setting.
This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
provider- this can be (1) a
RerankServiceOptionsobject encoding all desired properties for a reranking service; (2) aCollectionRerankOptions, that is likewise being set as the collection reranking configuration; or (3) it can be None, to unset the rerank setting, altogether from the collection definition, hence letting the API use its defaults; (4) it can be a string, the reranking provider name as seen in the response from the database'sfind_rerank_providersmethod. In the latter case, the other parameters should also be provided as needed. If this parameter is omitted, theenabledparameter must be supplied. See the examples below for an illustration of these usage patterns. model_name- a string, the name of the reranker model to use (must be compatible with the chosen provider).
authentication- a dictionary with the required authentication information if the reranking makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account.
parameters- a free-form key-value mapping providing additional,
model-dependent configuration settings. The allowed parameters for
a given model are specified in the response of the Database
find_rerank_providersmethod. enabled- if passed, this flag is used in the reranking definition for the collection. If omitted, defaults to True.
Returns
a CollectionDefinition obtained by adding, replacing or unsetting the rerank configuration of this collection definition.
Example
>>> from astrapy.info import ( ... CollectionDefinition, ... CollectionRerankOptions, ... RerankServiceOptions, ... ) >>> >>> zero = CollectionDefinition.builder() >>> >>> rrk1 = zero.set_rerank( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(rrk1.build().as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myRrkSvcOpt = RerankServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> rrk2 = zero.set_rerank(myRrkSvcOpt).build() >>> print(rrk2.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myColRrkOpt = CollectionRerankOptions( ... enabled=True, ... service=myRrkSvcOpt, ... ) >>> rrk3 = zero.set_rerank(myColRrkOpt).build() >>> print(rrk3.as_dict()) {'rerank': {'enabled': True, 'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> rrk4 = rrk1.set_rerank(enabled=False).build() >>> print(rrk4.as_dict()) {'rerank': {'enabled': False}} >>> >>> reset = rrk1.set_rerank(None).build() >>> print(reset.as_dict()) {} def set_vector_dimension(self, dimension: int | None) ‑> CollectionDefinition-
Expand source code
def set_vector_dimension(self, dimension: int | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector dimension. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: dimension: an integer, the number of components of vectors in the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a new setting for the collection's vector dimension. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
dimension- an integer, the number of components of vectors in the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.
def set_vector_metric(self, metric: str | None) ‑> CollectionDefinition-
Expand source code
def set_vector_metric(self, metric: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector similarity metric. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: metric: a value of those in `astrapy.constants.VectorMetric`, or an equivalent string such as "dot_product", used for vector search within the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=metric, source_model=_vector_options.source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a new setting for the collection's vector similarity metric. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
metric- a value of those in
VectorMetric, or an equivalent string such as "dot_product", used for vector search within the collection. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.
def set_vector_service(self,
provider: str | VectorServiceOptions | None,
model_name: str | None = None,
*,
authentication: dict[str, Any] | None = None,
parameters: dict[str, Any] | None = None) ‑> CollectionDefinition-
Expand source code
def set_vector_service( self, provider: str | VectorServiceOptions | None, model_name: str | None = None, *, authentication: dict[str, Any] | None = None, parameters: dict[str, Any] | None = None, ) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vectorize (i.e. server-side embeddings) service. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: provider: this can be (1) a whole `VectorServiceOptions` object encoding all desired properties for a vectorize service; or (2) it can be None, to signify removal of the entire vectorize setting; alternatively, (3) it can be a string, the vectorize provider name as seen in the response from the database's `find_embedding_providers` method. In the latter case, the other parameters should also be provided as needed. See the examples below for an illustration of these usage patterns. model_name: a string, the name of the vectorize model to use (must be compatible with the chosen provider). authentication: a dictionary with the required authentication information if the vectorize makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account. parameters: a free-form key-value mapping providing additional, model-dependent configuration settings. The allowed parameters for a given model are specified in the response of the Database `find_embedding_providers` method. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. Example: >>> from astrapy.info import CollectionDefinition, VectorServiceOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> svc1 = zero.set_vector_service( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(svc1.build().as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myVecSvcOpt = VectorServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> svc2 = zero.set_vector_service(myVecSvcOpt).build() >>> print(svc2.as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> reset = svc1.set_vector_service(None).build() >>> print(reset.as_dict()) {} """ _vector_options = self.vector or CollectionVectorOptions() if isinstance(provider, VectorServiceOptions): if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication' and 'parameters' " "cannot be passed when setting a VectorServiceOptions directly." ) raise ValueError(msg) return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=provider, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, ) else: new_service: VectorServiceOptions | None if provider is None: if ( model_name is not None or authentication is not None or parameters is not None ): msg = ( "Parameters 'model_name', 'authentication' and 'parameters' " "cannot be passed when unsetting the vector service." ) raise ValueError(msg) new_service = None else: new_service = VectorServiceOptions( provider=provider, model_name=model_name, authentication=authentication, parameters=parameters, ) return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=_vector_options.source_model, service=new_service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a new setting for the collection's vectorize (i.e. server-side embeddings) service. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
provider- this can be (1) a whole
VectorServiceOptionsobject encoding all desired properties for a vectorize service; or (2) it can be None, to signify removal of the entire vectorize setting; alternatively, (3) it can be a string, the vectorize provider name as seen in the response from the database'sfind_embedding_providersmethod. In the latter case, the other parameters should also be provided as needed. See the examples below for an illustration of these usage patterns. model_name- a string, the name of the vectorize model to use (must be compatible with the chosen provider).
authentication- a dictionary with the required authentication information if the vectorize makes use of secrets (API Keys) stored in the database Key Management System. See the Data API for more information on storing an API Key secret in one's Astra DB account.
parameters- a free-form key-value mapping providing additional,
model-dependent configuration settings. The allowed parameters for
a given model are specified in the response of the Database
find_embedding_providersmethod.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.
Example
>>> from astrapy.info import CollectionDefinition, VectorServiceOptions >>> >>> zero = CollectionDefinition.builder() >>> >>> svc1 = zero.set_vector_service( ... "myProvider", ... "myModelName", ... parameters={"p": "z"}, ... ) >>> print(svc1.build().as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> myVecSvcOpt = VectorServiceOptions( ... provider="myProvider", ... model_name="myModelName", ... parameters={"p": "z"}, ... ) >>> svc2 = zero.set_vector_service(myVecSvcOpt).build() >>> print(svc2.as_dict()) {'vector': {'service': {'provider': 'myProvider', 'modelName': 'myModelName', 'parameters': {'p': 'z'}}}} >>> >>> reset = svc1.set_vector_service(None).build() >>> print(reset.as_dict()) {} def set_vector_source_model(self, source_model: str | None) ‑> CollectionDefinition-
Expand source code
def set_vector_source_model(self, source_model: str | None) -> CollectionDefinition: """ Return a new collection definition object with a new setting for the collection's vector 'source model' parameter. This method is for use within the fluent interface for progressively building a complete collection definition. See the class docstring for a full example on using the fluent interface. Args: source_model: an optional string setting for the vector index, to help it pick the set of parameters best suited to a specific embedding model. See the Data API documentation for more details. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting - the Data API will use its defaults. Returns: a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition. """ _vector_options = self.vector or CollectionVectorOptions() return CollectionDefinition( vector=CollectionVectorOptions( dimension=_vector_options.dimension, metric=_vector_options.metric, source_model=source_model, service=_vector_options.service, ), lexical=self.lexical, rerank=self.rerank, indexing=self.indexing, default_id=self.default_id, )Return a new collection definition object with a new setting for the collection's vector 'source model' parameter. This method is for use within the fluent interface for progressively building a complete collection definition.
See the class docstring for a full example on using the fluent interface.
Args
source_model- an optional string setting for the vector index, to help it pick the set of parameters best suited to a specific embedding model. See the Data API documentation for more details. Setting even just one vector-related property makes the described collection a "vector collection". Providing None removes this setting - the Data API will use its defaults.
Returns
a CollectionDefinition obtained by adding (or replacing) the desired vector-related setting to this collection definition.
class CollectionDescriptor (name: str,
definition: CollectionDefinition,
raw_descriptor: dict[str, Any] | None)-
Expand source code
@dataclass class CollectionDescriptor: """ A structure expressing full description of a collection as the Data API returns it, i.e. its name and its definition. Attributes: name: the name of the collection. definition: a CollectionDefinition instance. raw_descriptor: the raw response from the Data API. Note: although the API format has the collection settings in a field called "options" (both in payloads and in responses, consistently), the corresponding attribute of this object is called `definition` to keep consistency with the TableDescriptor class and the attribute's data type (`CollectionDefinition`). As a consequence, when coercing a plain dictionary into this class, care must be taken that the plain dictionary key be "options", as could a response from the API have it. """ name: str definition: CollectionDefinition raw_descriptor: dict[str, Any] | None def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ f"name={self.name.__repr__()}", f"definition={self.definition.__repr__()}", None if self.raw_descriptor is None else "raw_descriptor=...", ] if pc is not None ] return f"{self.__class__.__name__}({', '.join(not_null_pieces)})" def __eq__(self, other: Any) -> bool: if isinstance(other, CollectionDescriptor): return self.name == other.name and self.definition == other.definition else: return False def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. Empty `definition` will not be returned at all. """ return { k: v for k, v in { "name": self.name, "options": self.definition.as_dict(), }.items() if v } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> CollectionDescriptor: """ Create an instance of CollectionDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"name", "options"}) return CollectionDescriptor( name=raw_dict["name"], definition=CollectionDefinition._from_dict(raw_dict.get("options") or {}), raw_descriptor=raw_dict, ) @classmethod def coerce( cls, raw_input: CollectionDescriptor | dict[str, Any] ) -> CollectionDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDescriptor. """ if isinstance(raw_input, CollectionDescriptor): return raw_input else: return cls._from_dict(raw_input)A structure expressing full description of a collection as the Data API returns it, i.e. its name and its definition.
Attributes
name- the name of the collection.
definition- a CollectionDefinition instance.
raw_descriptor- the raw response from the Data API.
Note
although the API format has the collection settings in a field called "options" (both in payloads and in responses, consistently), the corresponding attribute of this object is called
definitionto keep consistency with the TableDescriptor class and the attribute's data type (CollectionDefinition). As a consequence, when coercing a plain dictionary into this class, care must be taken that the plain dictionary key be "options", as could a response from the API have it.Static methods
def coerce(raw_input: CollectionDescriptor | dict[str, Any]) ‑> CollectionDescriptor-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a CollectionDescriptor.
Instance variables
var definition : CollectionDefinition-
The type of the None singleton.
var name : str-
The type of the None singleton.
var raw_descriptor : dict[str, typing.Any] | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. Empty `definition` will not be returned at all. """ return { k: v for k, v in { "name": self.name, "options": self.definition.as_dict(), }.items() if v }Recast this object into a dictionary. Empty
definitionwill not be returned at all.
class CollectionInfo (database_info: AstraDBDatabaseInfo,
keyspace: str,
name: str,
full_name: str)-
Expand source code
@dataclass class CollectionInfo: """ Represents the identifying information for a collection, including the information about the database the collection belongs to. Attributes: database_info: an AstraDBDatabaseInfo instance for the underlying database. keyspace: the keyspace where the collection is located. name: collection name. Unique within a keyspace (across tables/collections). full_name: identifier for the collection within the database, in the form "keyspace.collection_name". """ database_info: AstraDBDatabaseInfo keyspace: str name: str full_name: strRepresents the identifying information for a collection, including the information about the database the collection belongs to.
Attributes
database_info- an AstraDBDatabaseInfo instance for the underlying database.
keyspace- the keyspace where the collection is located.
name- collection name. Unique within a keyspace (across tables/collections).
full_name- identifier for the collection within the database, in the form "keyspace.collection_name".
Instance variables
var database_info : AstraDBDatabaseInfo-
The type of the None singleton.
var full_name : str-
The type of the None singleton.
var keyspace : str-
The type of the None singleton.
var name : str-
The type of the None singleton.
class CollectionLexicalOptions (*, enabled: bool | None = None, analyzer: str | dict[str, Any] | None = None)-
Expand source code
@dataclass class CollectionLexicalOptions: """ The "lexical" component of the collection options. See the Data API specifications for allowed values. Attributes: enabled: use this flag to programmatically set 'lexical' to on/off. analyzer: either a string (e.g. "standard") or a full dictionary specifying a more customized configuration for the text analyzer. See the Data API documentation for more on the dictionary form. """ enabled: bool analyzer: str | dict[str, Any] | None def __init__( self, *, enabled: bool | None = None, analyzer: str | dict[str, Any] | None = None, ) -> None: self.enabled = True if enabled is None else enabled self.analyzer = analyzer def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "enabled": self.enabled, "analyzer": self.analyzer, }.items() if v is not None } @staticmethod def _from_dict(raw_dict: dict[str, Any] | None) -> CollectionLexicalOptions | None: """ Create an instance of CollectionLexicalOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return CollectionLexicalOptions( enabled=raw_dict.get("enabled"), analyzer=raw_dict.get("analyzer"), ) else: return NoneThe "lexical" component of the collection options. See the Data API specifications for allowed values.
Attributes
enabled- use this flag to programmatically set 'lexical' to on/off.
analyzer- either a string (e.g. "standard") or a full dictionary specifying a more customized configuration for the text analyzer. See the Data API documentation for more on the dictionary form.
Instance variables
var analyzer : str | dict[str, typing.Any] | None-
The type of the None singleton.
var enabled : bool-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "enabled": self.enabled, "analyzer": self.analyzer, }.items() if v is not None }Recast this object into a dictionary.
class CollectionRerankOptions (*,
enabled: bool | None = None,
service: RerankServiceOptions | None = None)-
Expand source code
@dataclass class CollectionRerankOptions: """ The "rerank" component of the collection options. See the Data API specifications for allowed values. Attributes: enabled: use this flag to programmatically set 'rerank' to on/off. service: A `RerankServiceOptions` object describing the desired reranker. """ enabled: bool service: RerankServiceOptions | None def __init__( self, *, enabled: bool | None = None, service: RerankServiceOptions | None = None, ) -> None: self.enabled = True if enabled is None else enabled self.service = service def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "enabled": self.enabled, "service": None if self.service is None else self.service.as_dict(), }.items() if v is not None } @staticmethod def _from_dict( raw_dict: dict[str, Any] | None, ) -> CollectionRerankOptions | None: """ Create an instance of CollectionRerankOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return CollectionRerankOptions( enabled=raw_dict.get("enabled"), service=RerankServiceOptions._from_dict(raw_dict.get("service")), ) else: return NoneThe "rerank" component of the collection options. See the Data API specifications for allowed values.
Attributes
enabled- use this flag to programmatically set 'rerank' to on/off.
service- A
RerankServiceOptionsobject describing the desired reranker.
Instance variables
var enabled : bool-
The type of the None singleton.
var service : RerankServiceOptions | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "enabled": self.enabled, "service": None if self.service is None else self.service.as_dict(), }.items() if v is not None }Recast this object into a dictionary.
class CollectionVectorOptions (dimension: int | None = None,
metric: str | None = None,
source_model: str | None = None,
service: VectorServiceOptions | None = None)-
Expand source code
@dataclass class CollectionVectorOptions: """ The "vector" component of the collection options. See the Data API specifications for allowed values. Attributes: dimension: an optional positive integer, the dimensionality of the vector space (i.e. the number of components in each vector). metric: an optional choice of similarity metric to use in vector search. It must be a (string) value among `VectorMetric.DOT_PRODUCT`, `VectorMetric.EUCLIDEAN` and `VectorMetric.COSINE`. source_model: based on this value, the vector index can tune itself so as to achieve optimal performance for a given embedding model. See the Data API documentation for the allowed values. Defaults to "other". service: an optional VectorServiceOptions object in case a vectorize service is configured to achieve server-side embedding computation on the collection. """ dimension: int | None = None metric: str | None = None source_model: str | None = None service: VectorServiceOptions | None = None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "dimension": self.dimension, "metric": self.metric, "service": None if self.service is None else self.service.as_dict(), "sourceModel": None if self.source_model is None else self.source_model, }.items() if v is not None } @staticmethod def _from_dict(raw_dict: dict[str, Any] | None) -> CollectionVectorOptions | None: """ Create an instance of CollectionVectorOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return CollectionVectorOptions( dimension=raw_dict.get("dimension"), metric=raw_dict.get("metric"), source_model=raw_dict.get("sourceModel"), service=VectorServiceOptions._from_dict(raw_dict.get("service")), ) else: return NoneThe "vector" component of the collection options. See the Data API specifications for allowed values.
Attributes
dimension- an optional positive integer, the dimensionality of the vector space (i.e. the number of components in each vector).
metric- an optional choice of similarity metric to use in vector search.
It must be a (string) value among
VectorMetric.DOT_PRODUCT,VectorMetric.EUCLIDEANandVectorMetric.COSINE. source_model- based on this value, the vector index can tune itself so as to achieve optimal performance for a given embedding model. See the Data API documentation for the allowed values. Defaults to "other".
service- an optional VectorServiceOptions object in case a vectorize service is configured to achieve server-side embedding computation on the collection.
Instance variables
var dimension : int | None-
The type of the None singleton.
var metric : str | None-
The type of the None singleton.
var service : VectorServiceOptions | None-
The type of the None singleton.
var source_model : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "dimension": self.dimension, "metric": self.metric, "service": None if self.service is None else self.service.as_dict(), "sourceModel": None if self.source_model is None else self.source_model, }.items() if v is not None }Recast this object into a dictionary.
class ColumnType (*args, **kwds)-
Expand source code
class ColumnType(StrEnum): """ Enum to describe the scalar column types for Tables. A 'scalar' type is a non-composite type: that means, no sets, lists, maps and other non-primitive data types. """ ASCII = "ascii" BIGINT = "bigint" BLOB = "blob" BOOLEAN = "boolean" COUNTER = "counter" DATE = "date" DECIMAL = "decimal" DOUBLE = "double" DURATION = "duration" FLOAT = "float" INET = "inet" INT = "int" SMALLINT = "smallint" TEXT = "text" TIME = "time" TIMESTAMP = "timestamp" TIMEUUID = "timeuuid" TINYINT = "tinyint" UUID = "uuid" VARINT = "varint"Enum to describe the scalar column types for Tables.
A 'scalar' type is a non-composite type: that means, no sets, lists, maps and other non-primitive data types.
Ancestors
- StrEnum
- enum.Enum
Class variables
var ASCII-
The type of the None singleton.
var BIGINT-
The type of the None singleton.
var BLOB-
The type of the None singleton.
var BOOLEAN-
The type of the None singleton.
var COUNTER-
The type of the None singleton.
var DATE-
The type of the None singleton.
var DECIMAL-
The type of the None singleton.
var DOUBLE-
The type of the None singleton.
var DURATION-
The type of the None singleton.
var FLOAT-
The type of the None singleton.
var INET-
The type of the None singleton.
var INT-
The type of the None singleton.
var SMALLINT-
The type of the None singleton.
var TEXT-
The type of the None singleton.
var TIME-
The type of the None singleton.
var TIMESTAMP-
The type of the None singleton.
var TIMEUUID-
The type of the None singleton.
var TINYINT-
The type of the None singleton.
var UUID-
The type of the None singleton.
var VARINT-
The type of the None singleton.
Inherited members
class CreateTableDefinition (columns: dict[str, TableColumnTypeDescriptor],
primary_key: TablePrimaryKeyDescriptor)-
Expand source code
@dataclass class CreateTableDefinition: """ A structure expressing the definition ("schema") of a table to be created through the Data API. This object is passed as the `definition` parameter to the database `create_table` method. See the Data API specifications for detailed specification and allowed values. Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class. Attributes: columns: a map from column names to their type definition object. primary_key: a specification of the primary key for the table. Examples: >>> from astrapy.constants import SortMode >>> from astrapy.info import ( ... CreateTableDefinition, ... TablePrimaryKeyDescriptor, ... ColumnType, ... TableScalarColumnTypeDescriptor, ... TableValuedColumnType, ... TableValuedColumnTypeDescriptor, ... TableVectorColumnTypeDescriptor, ... ) >>> >>> # Create a table definition with the fluent interface: >>> table_definition = ( ... CreateTableDefinition.builder() ... .add_column("match_id", ColumnType.TEXT) ... .add_column("round", ColumnType.INT) ... .add_vector_column("m_vector", dimension=3) ... .add_column("score", ColumnType.INT) ... .add_column("when", ColumnType.TIMESTAMP) ... .add_column("winner", ColumnType.TEXT) ... .add_set_column("fighters", ColumnType.UUID) ... .add_partition_by(["match_id"]) ... .add_partition_sort({"round": SortMode.ASCENDING}) ... .build() ... ) >>> >>> # Create a table definition passing everything to the constructor: >>> table_definition_1 = CreateTableDefinition( ... columns={ ... "match_id": TableScalarColumnTypeDescriptor( ... ColumnType.TEXT, ... ), ... "round": TableScalarColumnTypeDescriptor( ... ColumnType.INT, ... ), ... "m_vector": TableVectorColumnTypeDescriptor( ... column_type="vector", dimension=3 ... ), ... "score": TableScalarColumnTypeDescriptor( ... ColumnType.INT, ... ), ... "when": TableScalarColumnTypeDescriptor( ... ColumnType.TIMESTAMP, ... ), ... "winner": TableScalarColumnTypeDescriptor( ... ColumnType.TEXT, ... ), ... "fighters": TableValuedColumnTypeDescriptor( ... column_type=TableValuedColumnType.SET, ... value_type=ColumnType.UUID, ... ), ... }, ... primary_key=TablePrimaryKeyDescriptor( ... partition_by=["match_id"], ... partition_sort={"round": SortMode.ASCENDING}, ... ), ... ) >>> >>> # Coerce a dictionary into a table definition: >>> table_definition_2_dict = { ... "columns": { ... "match_id": {"type": "text"}, ... "round": {"type": "int"}, ... "m_vector": {"type": "vector", "dimension": 3}, ... "score": {"type": "int"}, ... "when": {"type": "timestamp"}, ... "winner": {"type": "text"}, ... "fighters": {"type": "set", "valueType": "uuid"}, ... }, ... "primaryKey": { ... "partitionBy": ["match_id"], ... "partitionSort": {"round": 1}, ... }, ... } >>> table_definition_2 = CreateTableDefinition.coerce( ... table_definition_2_dict ... ) >>> >>> # The three created objects are exactly identical: >>> table_definition_2 == table_definition_1 True >>> table_definition_2 == table_definition True >>> # Assume there is a user-defined type (UDT) called "my_udt" (see >>> # `CreateTypeDefinition` and database `create_type` method for details). >>> # The expressions below result in the exact same table definition: >>> from astrapy.info import ( ... ColumnType, ... CreateTableDefinition, ... TablePrimaryKeyDescriptor, ... TableScalarColumnTypeDescriptor, ... TableUDTColumnDescriptor, ... TableValuedColumnType, ... TableValuedColumnTypeDescriptor, ... ) >>> >>> udt_tabledefinition = ( ... CreateTableDefinition.builder() ... .add_scalar_column("id", "text") ... .add_userdefinedtype_column("udt_col", udt_name="my_udt") ... .add_set_column( ... "set_udt_col", ... value_type={"type": "userDefined", "udtName": "my_udt"}, ... ) ... .add_partition_by(["id"]) ... .build() ... ) >>> >>> udt_tabledefinition_1 = CreateTableDefinition( ... columns={ ... "id": TableScalarColumnTypeDescriptor(ColumnType.TEXT), ... "udt_col": TableUDTColumnDescriptor( ... udt_name="my_udt", ... ), ... "set_udt_col": TableValuedColumnTypeDescriptor( ... column_type=TableValuedColumnType.SET, ... value_type=TableUDTColumnDescriptor( ... udt_name="my_udt", ... ), ... ), ... }, ... primary_key=TablePrimaryKeyDescriptor( ... partition_by=["id"], ... partition_sort={}, ... ) ... ) >>> >>> udt_tabledefinition_2 = CreateTableDefinition.coerce( ... { ... "columns": { ... "id": { ... "type": "text", ... }, ... "udt_col": { ... "type": "userDefined", ... "udtName": "my_udt", ... }, ... "set_udt_col": { ... "type": "set", ... "valueType": { ... "type": "userDefined", ... "udtName": "my_udt", ... }, ... }, ... }, ... "primaryKey": { ... "partitionBy": [ ... "id", ... ], ... "partitionSort": {}, ... }, ... }, ... ) >>> >>> # The three created objects are exactly identical: >>> udt_tabledefinition_2 == udt_tabledefinition_1 True >>> udt_tabledefinition_2 == udt_tabledefinition True """ columns: dict[str, TableColumnTypeDescriptor] primary_key: TablePrimaryKeyDescriptor def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ f"columns=[{','.join(self.columns.keys())}]", f"primary_key={self.primary_key}", ] if pc is not None ] return f"{self.__class__.__name__}({', '.join(not_null_pieces)})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "columns": { col_n: col_v.as_spec() for col_n, col_v in self.columns.items() }, "primaryKey": self.primary_key.as_dict(), }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> CreateTableDefinition: """ Create an instance of CreateTableDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"columns", "primaryKey"}) return CreateTableDefinition( columns={ col_n: TableColumnTypeDescriptor.coerce(col_v) for col_n, col_v in raw_dict["columns"].items() }, primary_key=TablePrimaryKeyDescriptor.coerce(raw_dict["primaryKey"]), ) @classmethod def coerce( cls, raw_input: CreateTableDefinition | dict[str, Any] ) -> CreateTableDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a CreateTableDefinition. """ if isinstance(raw_input, CreateTableDefinition): return raw_input else: return cls._from_dict(raw_input) @staticmethod def builder() -> CreateTableDefinition: """ Create an "empty" builder for constructing a table definition through a fluent interface. The resulting object has no columns and no primary key, traits that are to be added progressively with the corresponding methods. Since it describes a "table with no columns at all", the result of this method alone is not an acceptable table definition for running a table creation method on a Database. See the class docstring for a full example on using the fluent interface. Returns: a CreateTableDefinition formally describing a table with no columns. """ return CreateTableDefinition( columns={}, primary_key=TablePrimaryKeyDescriptor( partition_by=[], partition_sort={}, ), ) def add_scalar_column( self, column_name: str, column_type: str | ColumnType ) -> CreateTableDefinition: """ Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. column_type: a string, or a `ColumnType` value, defining the scalar type for the column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableScalarColumnTypeDescriptor( column_type=ColumnType.coerce(column_type) ) }, }, primary_key=self.primary_key, ) def add_column( self, column_name: str, column_type: str | ColumnType ) -> CreateTableDefinition: """ Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition. This method is an alias for `add_scalar_column`. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. column_type: a string, or a `ColumnType` value, defining the scalar type for the column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return self.add_scalar_column(column_name=column_name, column_type=column_type) def add_set_column( self, column_name: str, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'set' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. value_type: the type of the individual items stored in the set. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableValuedColumnTypeDescriptor( column_type=TableValuedColumnType.SET, value_type=value_type ) }, }, primary_key=self.primary_key, ) def add_list_column( self, column_name: str, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'list' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. value_type: the type of the individual items stored in the set. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableValuedColumnTypeDescriptor( column_type=TableValuedColumnType.LIST, value_type=value_type ) }, }, primary_key=self.primary_key, ) def add_map_column( self, column_name: str, key_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'map' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. key_type: the type of the individual keys in the map column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Using a column type not eligible to be a key will return a Data API error. value_type: the type of the individual items stored in the column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableKeyValuedColumnTypeDescriptor( column_type=TableKeyValuedColumnType.MAP, key_type=key_type, value_type=value_type, ) }, }, primary_key=self.primary_key, ) def add_vector_column( self, column_name: str, *, dimension: int | None = None, service: VectorServiceOptions | dict[str, Any] | None = None, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'vector' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. dimension: the dimensionality of the vector, i.e. the number of components each vector in this column will have. If a `service` parameter is supplied and the vectorize model allows for it, the dimension may be left unspecified to have the API set a default value. The Data API will raise an error if a table creation is attempted with a vector column for which neither a service nor the dimension are given. service: a `VectorServiceOptions` object, or an equivalent plain dictionary, defining the server-side embedding service associated to the column, if desired. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableVectorColumnTypeDescriptor( column_type=TableVectorColumnType.VECTOR, dimension=dimension, service=VectorServiceOptions.coerce(service), ) }, }, primary_key=self.primary_key, ) def add_userdefinedtype_column( self, column_name: str, udt_name: str, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'user defined' type (UDT). This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. udt_name: the name of the user-defined type for this column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableUDTColumnDescriptor( column_type=TableUDTColumnType.USERDEFINED, udt_name=udt_name, ) }, }, primary_key=self.primary_key, ) def add_partition_by( self, partition_columns: list[str] | str ) -> CreateTableDefinition: """ Return a new table definition object with one or more added `partition_by` columns. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Successive calls append the requested columns at the end of the pre-existing `partition_by` list. In other words, these two patterns are equivalent: (1) X.add_partition_by(["col1", "col2"]) (2) X.add_partition_by(["col1"]).add_partition_by("col2") Note that no deduplication is applied to the overall result: the caller should take care of not supplying the same column name more than once. Args: partition_columns: a list of column names (strings) to be added to the full table partition key. A single string (not a list) is also accepted. Returns: a CreateTableDefinition obtained by enriching the `partition_by` of this table definition as requested. """ _partition_columns = ( partition_columns if isinstance(partition_columns, list) else [partition_columns] ) return CreateTableDefinition( columns=self.columns, primary_key=TablePrimaryKeyDescriptor( partition_by=self.primary_key.partition_by + _partition_columns, partition_sort=self.primary_key.partition_sort, ), ) def add_partition_sort( self, partition_sort: dict[str, int] ) -> CreateTableDefinition: """ Return a new table definition object with one or more added `partition_sort` column specifications. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Successive calls append (or replace) the requested columns at the end of the pre-existing `partition_sort` dictionary. In other words, these two patterns are equivalent: (1) X.add_partition_sort({"c1": 1, "c2": -1}) (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1}) Args: partition_sort: a dictoinary mapping column names to their sort mode (ascending/descending, i.e 1/-1. See also `astrapy.constants.SortMode`). Returns: a CreateTableDefinition obtained by enriching the `partition_sort` of this table definition as requested. """ return CreateTableDefinition( columns=self.columns, primary_key=TablePrimaryKeyDescriptor( partition_by=self.primary_key.partition_by, partition_sort={**self.primary_key.partition_sort, **partition_sort}, ), ) def build(self) -> CreateTableDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a table definition ready for use in e.g. table creation. Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly. See the class docstring for a full example on using the fluent interface. Returns: a CreateTableDefinition obtained by finalizing the definition being built so far. """ return selfA structure expressing the definition ("schema") of a table to be created through the Data API. This object is passed as the
definitionparameter to the databasecreate_tablemethod.See the Data API specifications for detailed specification and allowed values.
Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class.
Attributes
columns- a map from column names to their type definition object.
primary_key- a specification of the primary key for the table.
Examples
>>> from astrapy.constants import SortMode >>> from astrapy.info import ( ... CreateTableDefinition, ... TablePrimaryKeyDescriptor, ... ColumnType, ... TableScalarColumnTypeDescriptor, ... TableValuedColumnType, ... TableValuedColumnTypeDescriptor, ... TableVectorColumnTypeDescriptor, ... ) >>> >>> # Create a table definition with the fluent interface: >>> table_definition = ( ... CreateTableDefinition.builder() ... .add_column("match_id", ColumnType.TEXT) ... .add_column("round", ColumnType.INT) ... .add_vector_column("m_vector", dimension=3) ... .add_column("score", ColumnType.INT) ... .add_column("when", ColumnType.TIMESTAMP) ... .add_column("winner", ColumnType.TEXT) ... .add_set_column("fighters", ColumnType.UUID) ... .add_partition_by(["match_id"]) ... .add_partition_sort({"round": SortMode.ASCENDING}) ... .build() ... ) >>> >>> # Create a table definition passing everything to the constructor: >>> table_definition_1 = CreateTableDefinition( ... columns={ ... "match_id": TableScalarColumnTypeDescriptor( ... ColumnType.TEXT, ... ), ... "round": TableScalarColumnTypeDescriptor( ... ColumnType.INT, ... ), ... "m_vector": TableVectorColumnTypeDescriptor( ... column_type="vector", dimension=3 ... ), ... "score": TableScalarColumnTypeDescriptor( ... ColumnType.INT, ... ), ... "when": TableScalarColumnTypeDescriptor( ... ColumnType.TIMESTAMP, ... ), ... "winner": TableScalarColumnTypeDescriptor( ... ColumnType.TEXT, ... ), ... "fighters": TableValuedColumnTypeDescriptor( ... column_type=TableValuedColumnType.SET, ... value_type=ColumnType.UUID, ... ), ... }, ... primary_key=TablePrimaryKeyDescriptor( ... partition_by=["match_id"], ... partition_sort={"round": SortMode.ASCENDING}, ... ), ... ) >>> >>> # Coerce a dictionary into a table definition: >>> table_definition_2_dict = { ... "columns": { ... "match_id": {"type": "text"}, ... "round": {"type": "int"}, ... "m_vector": {"type": "vector", "dimension": 3}, ... "score": {"type": "int"}, ... "when": {"type": "timestamp"}, ... "winner": {"type": "text"}, ... "fighters": {"type": "set", "valueType": "uuid"}, ... }, ... "primaryKey": { ... "partitionBy": ["match_id"], ... "partitionSort": {"round": 1}, ... }, ... } >>> table_definition_2 = CreateTableDefinition.coerce( ... table_definition_2_dict ... ) >>> >>> # The three created objects are exactly identical: >>> table_definition_2 == table_definition_1 True >>> table_definition_2 == table_definition True>>> # Assume there is a user-defined type (UDT) called "my_udt" (see >>> # <code><a title="astrapy.info.CreateTypeDefinition" href="#astrapy.info.CreateTypeDefinition">CreateTypeDefinition</a></code> and database <code>create\_type</code> method for details). >>> # The expressions below result in the exact same table definition: >>> from astrapy.info import ( ... ColumnType, ... CreateTableDefinition, ... TablePrimaryKeyDescriptor, ... TableScalarColumnTypeDescriptor, ... TableUDTColumnDescriptor, ... TableValuedColumnType, ... TableValuedColumnTypeDescriptor, ... ) >>> >>> udt_tabledefinition = ( ... CreateTableDefinition.builder() ... .add_scalar_column("id", "text") ... .add_userdefinedtype_column("udt_col", udt_name="my_udt") ... .add_set_column( ... "set_udt_col", ... value_type={"type": "userDefined", "udtName": "my_udt"}, ... ) ... .add_partition_by(["id"]) ... .build() ... ) >>> >>> udt_tabledefinition_1 = CreateTableDefinition( ... columns={ ... "id": TableScalarColumnTypeDescriptor(ColumnType.TEXT), ... "udt_col": TableUDTColumnDescriptor( ... udt_name="my_udt", ... ), ... "set_udt_col": TableValuedColumnTypeDescriptor( ... column_type=TableValuedColumnType.SET, ... value_type=TableUDTColumnDescriptor( ... udt_name="my_udt", ... ), ... ), ... }, ... primary_key=TablePrimaryKeyDescriptor( ... partition_by=["id"], ... partition_sort={}, ... ) ... ) >>> >>> udt_tabledefinition_2 = CreateTableDefinition.coerce( ... { ... "columns": { ... "id": { ... "type": "text", ... }, ... "udt_col": { ... "type": "userDefined", ... "udtName": "my_udt", ... }, ... "set_udt_col": { ... "type": "set", ... "valueType": { ... "type": "userDefined", ... "udtName": "my_udt", ... }, ... }, ... }, ... "primaryKey": { ... "partitionBy": [ ... "id", ... ], ... "partitionSort": {}, ... }, ... }, ... ) >>> >>> # The three created objects are exactly identical: >>> udt_tabledefinition_2 == udt_tabledefinition_1 True >>> udt_tabledefinition_2 == udt_tabledefinition TrueStatic methods
def builder() ‑> CreateTableDefinition-
Expand source code
@staticmethod def builder() -> CreateTableDefinition: """ Create an "empty" builder for constructing a table definition through a fluent interface. The resulting object has no columns and no primary key, traits that are to be added progressively with the corresponding methods. Since it describes a "table with no columns at all", the result of this method alone is not an acceptable table definition for running a table creation method on a Database. See the class docstring for a full example on using the fluent interface. Returns: a CreateTableDefinition formally describing a table with no columns. """ return CreateTableDefinition( columns={}, primary_key=TablePrimaryKeyDescriptor( partition_by=[], partition_sort={}, ), )Create an "empty" builder for constructing a table definition through a fluent interface. The resulting object has no columns and no primary key, traits that are to be added progressively with the corresponding methods.
Since it describes a "table with no columns at all", the result of this method alone is not an acceptable table definition for running a table creation method on a Database.
See the class docstring for a full example on using the fluent interface.
Returns
a CreateTableDefinition formally describing a table with no columns.
def coerce(raw_input: CreateTableDefinition | dict[str, Any]) ‑> CreateTableDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a CreateTableDefinition.
Instance variables
var columns : dict[str, TableColumnTypeDescriptor]-
The type of the None singleton.
var primary_key : TablePrimaryKeyDescriptor-
The type of the None singleton.
Methods
def add_column(self,
column_name: str,
column_type: str | ColumnType) ‑> CreateTableDefinition-
Expand source code
def add_column( self, column_name: str, column_type: str | ColumnType ) -> CreateTableDefinition: """ Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition. This method is an alias for `add_scalar_column`. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. column_type: a string, or a `ColumnType` value, defining the scalar type for the column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return self.add_scalar_column(column_name=column_name, column_type=column_type)Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition.
This method is an alias for
add_scalar_column.See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
column_type- a string, or a
ColumnTypevalue, defining the scalar type for the column.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_list_column(self,
column_name: str,
value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor) ‑> CreateTableDefinition-
Expand source code
def add_list_column( self, column_name: str, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'list' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. value_type: the type of the individual items stored in the set. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableValuedColumnTypeDescriptor( column_type=TableValuedColumnType.LIST, value_type=value_type ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of 'list' type. This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
value_type- the type of the individual items stored in the set.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_map_column(self,
column_name: str,
key_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor,
value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor) ‑> CreateTableDefinition-
Expand source code
def add_map_column( self, column_name: str, key_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'map' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. key_type: the type of the individual keys in the map column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Using a column type not eligible to be a key will return a Data API error. value_type: the type of the individual items stored in the column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableKeyValuedColumnTypeDescriptor( column_type=TableKeyValuedColumnType.MAP, key_type=key_type, value_type=value_type, ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of 'map' type. This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
key_type- the type of the individual keys in the map column.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Using a column type not eligible to be a key will return a Data API error. value_type- the type of the individual items stored in the column.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_partition_by(self, partition_columns: list[str] | str) ‑> CreateTableDefinition-
Expand source code
def add_partition_by( self, partition_columns: list[str] | str ) -> CreateTableDefinition: """ Return a new table definition object with one or more added `partition_by` columns. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Successive calls append the requested columns at the end of the pre-existing `partition_by` list. In other words, these two patterns are equivalent: (1) X.add_partition_by(["col1", "col2"]) (2) X.add_partition_by(["col1"]).add_partition_by("col2") Note that no deduplication is applied to the overall result: the caller should take care of not supplying the same column name more than once. Args: partition_columns: a list of column names (strings) to be added to the full table partition key. A single string (not a list) is also accepted. Returns: a CreateTableDefinition obtained by enriching the `partition_by` of this table definition as requested. """ _partition_columns = ( partition_columns if isinstance(partition_columns, list) else [partition_columns] ) return CreateTableDefinition( columns=self.columns, primary_key=TablePrimaryKeyDescriptor( partition_by=self.primary_key.partition_by + _partition_columns, partition_sort=self.primary_key.partition_sort, ), )Return a new table definition object with one or more added
partition_bycolumns. This method is for use within the fluent interface for progressively building a complete table definition.See the class docstring for a full example on using the fluent interface.
Successive calls append the requested columns at the end of the pre-existing
partition_bylist. In other words, these two patterns are equivalent: (1) X.add_partition_by(["col1", "col2"]) (2) X.add_partition_by(["col1"]).add_partition_by("col2")Note that no deduplication is applied to the overall result: the caller should take care of not supplying the same column name more than once.
Args
partition_columns- a list of column names (strings) to be added to the full table partition key. A single string (not a list) is also accepted.
Returns
a CreateTableDefinition obtained by enriching the
partition_byof this table definition as requested. def add_partition_sort(self, partition_sort: dict[str, int]) ‑> CreateTableDefinition-
Expand source code
def add_partition_sort( self, partition_sort: dict[str, int] ) -> CreateTableDefinition: """ Return a new table definition object with one or more added `partition_sort` column specifications. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Successive calls append (or replace) the requested columns at the end of the pre-existing `partition_sort` dictionary. In other words, these two patterns are equivalent: (1) X.add_partition_sort({"c1": 1, "c2": -1}) (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1}) Args: partition_sort: a dictoinary mapping column names to their sort mode (ascending/descending, i.e 1/-1. See also `astrapy.constants.SortMode`). Returns: a CreateTableDefinition obtained by enriching the `partition_sort` of this table definition as requested. """ return CreateTableDefinition( columns=self.columns, primary_key=TablePrimaryKeyDescriptor( partition_by=self.primary_key.partition_by, partition_sort={**self.primary_key.partition_sort, **partition_sort}, ), )Return a new table definition object with one or more added
partition_sortcolumn specifications. This method is for use within the fluent interface for progressively building a complete table definition.See the class docstring for a full example on using the fluent interface.
Successive calls append (or replace) the requested columns at the end of the pre-existing
partition_sortdictionary. In other words, these two patterns are equivalent: (1) X.add_partition_sort({"c1": 1, "c2": -1}) (2) X.add_partition_sort({"c1": 1}).add_partition_sort({"c2": -1})Args
partition_sort- a dictoinary mapping column names to their sort mode
(ascending/descending, i.e 1/-1. See also
SortMode).Returns
a CreateTableDefinition obtained by enriching the
partition_sortof this table definition as requested. def add_scalar_column(self,
column_name: str,
column_type: str | ColumnType) ‑> CreateTableDefinition-
Expand source code
def add_scalar_column( self, column_name: str, column_type: str | ColumnType ) -> CreateTableDefinition: """ Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. column_type: a string, or a `ColumnType` value, defining the scalar type for the column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableScalarColumnTypeDescriptor( column_type=ColumnType.coerce(column_type) ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
column_type- a string, or a
ColumnTypevalue, defining the scalar type for the column.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_set_column(self,
column_name: str,
value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor) ‑> CreateTableDefinition-
Expand source code
def add_set_column( self, column_name: str, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'set' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. value_type: the type of the individual items stored in the set. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableValuedColumnTypeDescriptor( column_type=TableValuedColumnType.SET, value_type=value_type ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of 'set' type. This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
value_type- the type of the individual items stored in the set.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_userdefinedtype_column(self, column_name: str, udt_name: str) ‑> CreateTableDefinition-
Expand source code
def add_userdefinedtype_column( self, column_name: str, udt_name: str, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'user defined' type (UDT). This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. udt_name: the name of the user-defined type for this column. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableUDTColumnDescriptor( column_type=TableUDTColumnType.USERDEFINED, udt_name=udt_name, ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of 'user defined' type (UDT). This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
udt_name- the name of the user-defined type for this column.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def add_vector_column(self,
column_name: str,
*,
dimension: int | None = None,
service: VectorServiceOptions | dict[str, Any] | None = None) ‑> CreateTableDefinition-
Expand source code
def add_vector_column( self, column_name: str, *, dimension: int | None = None, service: VectorServiceOptions | dict[str, Any] | None = None, ) -> CreateTableDefinition: """ Return a new table definition object with an added column of 'vector' type. This method is for use within the fluent interface for progressively building a complete table definition. See the class docstring for a full example on using the fluent interface. Args: column_name: the name of the new column to add to the definition. dimension: the dimensionality of the vector, i.e. the number of components each vector in this column will have. If a `service` parameter is supplied and the vectorize model allows for it, the dimension may be left unspecified to have the API set a default value. The Data API will raise an error if a table creation is attempted with a vector column for which neither a service nor the dimension are given. service: a `VectorServiceOptions` object, or an equivalent plain dictionary, defining the server-side embedding service associated to the column, if desired. Returns: a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition. """ return CreateTableDefinition( columns={ **self.columns, **{ column_name: TableVectorColumnTypeDescriptor( column_type=TableVectorColumnType.VECTOR, dimension=dimension, service=VectorServiceOptions.coerce(service), ) }, }, primary_key=self.primary_key, )Return a new table definition object with an added column of 'vector' type. This method is for use within the fluent interface for progressively building a complete table definition.
See the class docstring for a full example on using the fluent interface.
Args
column_name- the name of the new column to add to the definition.
dimension- the dimensionality of the vector, i.e. the number of components
each vector in this column will have. If a
serviceparameter is supplied and the vectorize model allows for it, the dimension may be left unspecified to have the API set a default value. The Data API will raise an error if a table creation is attempted with a vector column for which neither a service nor the dimension are given. service- a
VectorServiceOptionsobject, or an equivalent plain dictionary, defining the server-side embedding service associated to the column, if desired.
Returns
a CreateTableDefinition obtained by adding (or replacing) the desired column to this table definition.
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "columns": { col_n: col_v.as_spec() for col_n, col_v in self.columns.items() }, "primaryKey": self.primary_key.as_dict(), }.items() if v is not None }Recast this object into a dictionary.
def build(self) ‑> CreateTableDefinition-
Expand source code
def build(self) -> CreateTableDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a table definition ready for use in e.g. table creation. Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly. See the class docstring for a full example on using the fluent interface. Returns: a CreateTableDefinition obtained by finalizing the definition being built so far. """ return selfThe final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a table definition ready for use in e.g. table creation.
Note that this step may be automatically invoked by the receiving methods: however it is a good practice - and also adds to the readability of the code - to call it explicitly.
See the class docstring for a full example on using the fluent interface.
Returns
a CreateTableDefinition obtained by finalizing the definition being built so far.
class CreateTypeDefinition (*, fields: dict[str, TableColumnTypeDescriptor | dict[str, Any] | str])-
Expand source code
@dataclass class CreateTypeDefinition: """ A structure expressing the definition of a user-defined type( UDT) to be created through the Data API. This object is passed as the `definition` parameter to the database `create_type` method. See the Data API specifications for detailed specification and allowed values. Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class. Attributes: fields: a map from field names to their type definition object. This follows the same structure as the `columns` attribute of `CreateTableDefinition`. Example: >>> from astrapy.info import CreateTypeDefinition >>> from astrapy.info import ColumnType, TableScalarColumnTypeDescriptor >>> >>> type_definition_0 = ( ... CreateTypeDefinition.builder() ... .add_field("tagline", ColumnType.TEXT) ... .add_field("score", ColumnType.INT) ... .add_field("height", "float") # plain strings accepted for field types ... .build() ... ) >>> >>> type_definition_1 = CreateTypeDefinition(fields={ ... "tagline": TableScalarColumnTypeDescriptor(ColumnType.TEXT), ... "score": TableScalarColumnTypeDescriptor(ColumnType.INT), ... "height": TableScalarColumnTypeDescriptor(ColumnType.FLOAT), ... }) >>> >>> fields_dict_2 = { ... "fields": { ... "tagline": "text", ... "score": "int", ... "height": "float", ... }, ... } >>> type_definition_2 = CreateTypeDefinition.coerce(fields_dict_2) >>> >>> fields_dict_3 = { ... "fields": { ... "tagline": "text", ... "score": {"type": "int"}, ... "height": TableScalarColumnTypeDescriptor(ColumnType.FLOAT), ... }, ... } >>> type_definition_3_mixed = CreateTypeDefinition.coerce(fields_dict_3) >>> type_definition_0 == type_definition_1 True >>> type_definition_1 == type_definition_2 True >>> type_definition_2 == type_definition_3_mixed True """ fields: dict[str, TableColumnTypeDescriptor] def __init__( self, *, fields: dict[str, TableColumnTypeDescriptor | dict[str, Any] | str], ) -> None: self.fields = { fld_n: TableColumnTypeDescriptor.coerce(fld_v) for fld_n, fld_v in fields.items() } def __repr__(self) -> str: fld_desc = f"fields=[{','.join(sorted(self.fields.keys()))}]" return f"{self.__class__.__name__}({fld_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": {col_n: col_v.as_dict() for col_n, col_v in self.fields.items()}, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> CreateTypeDefinition: """ Create an instance of CreateTypeDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"fields"}) return CreateTypeDefinition( fields={ fld_n: TableColumnTypeDescriptor.coerce(fld_v) for fld_n, fld_v in raw_dict["fields"].items() }, ) @classmethod def coerce( cls, raw_input: CreateTypeDefinition | dict[str, Any] ) -> CreateTypeDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a CreateTypeDefinition. """ if isinstance(raw_input, CreateTypeDefinition): return raw_input else: return cls._from_dict(raw_input) @staticmethod def builder() -> CreateTypeDefinition: """ Create an "empty" builder for constructing a type definition through a fluent interface. The resulting object has no fields yet: those are to be added progressively with the `add_field` method. Being a "type without fields", the type definition returned by this method cannot be directly used to create a type. See the class docstring for a full example on using the fluent interface. Returns: a CreateTypeDefinition formally describing a type without fields. """ return CreateTypeDefinition(fields={}) def add_field( self, field_name: str, field_type: str | ColumnType ) -> CreateTypeDefinition: """ Return a new type definition object with an added field of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building the final type definition. See the class docstring for a full example on using the fluent interface. Args: field_name: the name of the new field to add to the type. field_type: a string, or a `ColumnType` value, defining the scalar type for the field. Returns: a CreateTypeDefinition obtained by adding (or replacing) the desired field to this type definition. """ return CreateTypeDefinition( fields={ **self.fields, **{ field_name: TableScalarColumnTypeDescriptor( column_type=ColumnType.coerce(field_type) ) }, }, ) def build(self) -> CreateTypeDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a type definition ready for use e.g. with the database's `create_type` method. See the class docstring for a full example on using the fluent interface. Returns: a CreateTypeDefinition obtained by finalizing the definition being built so far. """ return selfA structure expressing the definition of a user-defined type( UDT) to be created through the Data API. This object is passed as the
definitionparameter to the databasecreate_typemethod.See the Data API specifications for detailed specification and allowed values.
Instances of this object can be created in three ways: using a fluent interface, passing a fully-formed definition to the class constructor, or coercing an appropriately-shaped plain dictionary into this class.
Attributes
fields- a map from field names to their type definition object. This follows
the same structure as the
columnsattribute ofCreateTableDefinition.
Example
>>> from astrapy.info import CreateTypeDefinition >>> from astrapy.info import ColumnType, TableScalarColumnTypeDescriptor >>> >>> type_definition_0 = ( ... CreateTypeDefinition.builder() ... .add_field("tagline", ColumnType.TEXT) ... .add_field("score", ColumnType.INT) ... .add_field("height", "float") # plain strings accepted for field types ... .build() ... ) >>> >>> type_definition_1 = CreateTypeDefinition(fields={ ... "tagline": TableScalarColumnTypeDescriptor(ColumnType.TEXT), ... "score": TableScalarColumnTypeDescriptor(ColumnType.INT), ... "height": TableScalarColumnTypeDescriptor(ColumnType.FLOAT), ... }) >>> >>> fields_dict_2 = { ... "fields": { ... "tagline": "text", ... "score": "int", ... "height": "float", ... }, ... } >>> type_definition_2 = CreateTypeDefinition.coerce(fields_dict_2) >>> >>> fields_dict_3 = { ... "fields": { ... "tagline": "text", ... "score": {"type": "int"}, ... "height": TableScalarColumnTypeDescriptor(ColumnType.FLOAT), ... }, ... } >>> type_definition_3_mixed = CreateTypeDefinition.coerce(fields_dict_3) >>> type_definition_0 == type_definition_1 True >>> type_definition_1 == type_definition_2 True >>> type_definition_2 == type_definition_3_mixed TrueStatic methods
def builder() ‑> CreateTypeDefinition-
Expand source code
@staticmethod def builder() -> CreateTypeDefinition: """ Create an "empty" builder for constructing a type definition through a fluent interface. The resulting object has no fields yet: those are to be added progressively with the `add_field` method. Being a "type without fields", the type definition returned by this method cannot be directly used to create a type. See the class docstring for a full example on using the fluent interface. Returns: a CreateTypeDefinition formally describing a type without fields. """ return CreateTypeDefinition(fields={})Create an "empty" builder for constructing a type definition through a fluent interface. The resulting object has no fields yet: those are to be added progressively with the
add_fieldmethod.Being a "type without fields", the type definition returned by this method cannot be directly used to create a type.
See the class docstring for a full example on using the fluent interface.
Returns
a CreateTypeDefinition formally describing a type without fields.
def coerce(raw_input: CreateTypeDefinition | dict[str, Any]) ‑> CreateTypeDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a CreateTypeDefinition.
Instance variables
var fields : dict[str, TableColumnTypeDescriptor]-
The type of the None singleton.
Methods
def add_field(self,
field_name: str,
field_type: str | ColumnType) ‑> CreateTypeDefinition-
Expand source code
def add_field( self, field_name: str, field_type: str | ColumnType ) -> CreateTypeDefinition: """ Return a new type definition object with an added field of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building the final type definition. See the class docstring for a full example on using the fluent interface. Args: field_name: the name of the new field to add to the type. field_type: a string, or a `ColumnType` value, defining the scalar type for the field. Returns: a CreateTypeDefinition obtained by adding (or replacing) the desired field to this type definition. """ return CreateTypeDefinition( fields={ **self.fields, **{ field_name: TableScalarColumnTypeDescriptor( column_type=ColumnType.coerce(field_type) ) }, }, )Return a new type definition object with an added field of a scalar type (i.e. not a list, set or other composite type). This method is for use within the fluent interface for progressively building the final type definition.
See the class docstring for a full example on using the fluent interface.
Args
field_name- the name of the new field to add to the type.
field_type- a string, or a
ColumnTypevalue, defining the scalar type for the field.
Returns
a CreateTypeDefinition obtained by adding (or replacing) the desired field to this type definition.
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "fields": {col_n: col_v.as_dict() for col_n, col_v in self.fields.items()}, }Recast this object into a dictionary.
def build(self) ‑> CreateTypeDefinition-
Expand source code
def build(self) -> CreateTypeDefinition: """ The final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a type definition ready for use e.g. with the database's `create_type` method. See the class docstring for a full example on using the fluent interface. Returns: a CreateTypeDefinition obtained by finalizing the definition being built so far. """ return selfThe final step in the fluent (builder) interface. Calling this method finalizes the definition that has been built so far and makes it into a type definition ready for use e.g. with the database's
create_typemethod.See the class docstring for a full example on using the fluent interface.
Returns
a CreateTypeDefinition obtained by finalizing the definition being built so far.
class EmbeddingAPIModelSupport (status: str, message: str | None)-
Expand source code
@dataclass class EmbeddingAPIModelSupport: """ A representation of the API support status for an embedding model. Attributes: status: a string describing the support status. message: an optional string message alongside the status. """ status: str message: str | None def __repr__(self) -> str: return f"EmbeddingAPIModelSupport({self.status})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "status": self.status, "message": self.message, }.items() if v is not None } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingAPIModelSupport: """ Create an instance of EmbeddingAPIModelSupport from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - {"status", "message"} if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingAPIModelSupport`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingAPIModelSupport( status=raw_dict.get("status") or "SUPPORTED", message=raw_dict.get("message"), )A representation of the API support status for an embedding model.
Attributes
status- a string describing the support status.
message- an optional string message alongside the status.
Instance variables
var message : str | None-
The type of the None singleton.
var status : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "status": self.status, "message": self.message, }.items() if v is not None }Recast this object into a dictionary.
class EmbeddingProvider (display_name: str | None,
models: list[EmbeddingProviderModel],
parameters: list[EmbeddingProviderParameter],
supported_authentication: dict[str, EmbeddingProviderAuthentication],
url: str | None)-
Expand source code
@dataclass class EmbeddingProvider: """ A representation of an embedding provider, as returned by the 'findEmbeddingProviders' Data API endpoint. Attributes: display_name: a version of the provider name for display and pretty printing. Not to be used when issuing vectorize API requests (for the latter, it is the key in the providers dictionary that is required). models: a list of `EmbeddingProviderModel` objects pertaining to the provider. parameters: a list of `EmbeddingProviderParameter` objects common to all models for this provider. supported_authentication: a dictionary of the authentication modes for this provider. Note that disabled modes may still appear in this map, albeit with the `enabled` property set to False. url: a string template for the URL used by the Data API when issuing the request toward the embedding provider. This is of no direct concern to the Data API user. """ def __repr__(self) -> str: return f"EmbeddingProvider(display_name='{self.display_name}', models={self.models})" display_name: str | None models: list[EmbeddingProviderModel] parameters: list[EmbeddingProviderParameter] supported_authentication: dict[str, EmbeddingProviderAuthentication] url: str | None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "displayName": self.display_name, "models": [model.as_dict() for model in self.models], "parameters": [parameter.as_dict() for parameter in self.parameters], "supportedAuthentication": { sa_name: sa_value.as_dict() for sa_name, sa_value in self.supported_authentication.items() }, "url": self.url, } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProvider: """ Create an instance of EmbeddingProvider from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "displayName", "models", "parameters", "supportedAuthentication", "url", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingProvider`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingProvider( display_name=raw_dict["displayName"], models=[ EmbeddingProviderModel._from_dict(model_dict) for model_dict in raw_dict["models"] ], parameters=[ EmbeddingProviderParameter._from_dict(param_dict) for param_dict in raw_dict["parameters"] ], supported_authentication={ sa_name: EmbeddingProviderAuthentication._from_dict(sa_dict) for sa_name, sa_dict in raw_dict["supportedAuthentication"].items() }, url=raw_dict["url"], )A representation of an embedding provider, as returned by the 'findEmbeddingProviders' Data API endpoint.
Attributes
display_name- a version of the provider name for display and pretty printing. Not to be used when issuing vectorize API requests (for the latter, it is the key in the providers dictionary that is required).
models- a list of
EmbeddingProviderModelobjects pertaining to the provider. parameters- a list of
EmbeddingProviderParameterobjects common to all models for this provider. supported_authentication- a dictionary of the authentication modes for
this provider. Note that disabled modes may still appear in this map,
albeit with the
enabledproperty set to False. url- a string template for the URL used by the Data API when issuing the request toward the embedding provider. This is of no direct concern to the Data API user.
Instance variables
var display_name : str | None-
The type of the None singleton.
var models : list[EmbeddingProviderModel]-
The type of the None singleton.
var parameters : list[EmbeddingProviderParameter]-
The type of the None singleton.
var supported_authentication : dict[str, EmbeddingProviderAuthentication]-
The type of the None singleton.
var url : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "displayName": self.display_name, "models": [model.as_dict() for model in self.models], "parameters": [parameter.as_dict() for parameter in self.parameters], "supportedAuthentication": { sa_name: sa_value.as_dict() for sa_name, sa_value in self.supported_authentication.items() }, "url": self.url, }Recast this object into a dictionary.
class EmbeddingProviderAuthentication (enabled: bool,
tokens: list[EmbeddingProviderToken])-
Expand source code
@dataclass class EmbeddingProviderAuthentication: """ A representation of an authentication mode for using an embedding model, modeling the corresponding part of the response returned by the 'findEmbeddingProviders' Data API endpoint (namely "supportedAuthentication"). Attributes: enabled: whether this authentication mode is available for a given model. tokens: a list of `EmbeddingProviderToken` objects, detailing the secrets required for the authentication mode. """ enabled: bool tokens: list[EmbeddingProviderToken] def __repr__(self) -> str: return ( f"EmbeddingProviderAuthentication(enabled={self.enabled}, " f"tokens={','.join(str(token) for token in self.tokens)})" ) def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "enabled": self.enabled, "tokens": [token.as_dict() for token in self.tokens], } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderAuthentication: """ Create an instance of EmbeddingProviderAuthentication from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "enabled", "tokens", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingProviderAuthentication`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingProviderAuthentication( enabled=raw_dict["enabled"], tokens=[ EmbeddingProviderToken._from_dict(token_dict) for token_dict in raw_dict["tokens"] ], )A representation of an authentication mode for using an embedding model, modeling the corresponding part of the response returned by the 'findEmbeddingProviders' Data API endpoint (namely "supportedAuthentication").
Attributes
enabled- whether this authentication mode is available for a given model.
tokens- a list of
EmbeddingProviderTokenobjects, detailing the secrets required for the authentication mode.
Instance variables
var enabled : bool-
The type of the None singleton.
var tokens : list[EmbeddingProviderToken]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "enabled": self.enabled, "tokens": [token.as_dict() for token in self.tokens], }Recast this object into a dictionary.
class EmbeddingProviderModel (name: str,
parameters: list[EmbeddingProviderParameter],
vector_dimension: int | None,
api_model_support: EmbeddingAPIModelSupport)-
Expand source code
@dataclass class EmbeddingProviderModel: """ A representation of an embedding model as returned by the 'findEmbeddingProviders' Data API endpoint. Attributes: name: the model name as must be passed when issuing vectorize operations to the API. parameters: a list of the `EmbeddingProviderParameter` objects the model admits. vector_dimension: an integer for the dimensionality of the embedding model. if this is None, the dimension can assume multiple values as specified by a corresponding parameter listed with the model. api_model_support: the status of API support for the model, in the form of an EmbeddingAPIModelSupport object. """ name: str parameters: list[EmbeddingProviderParameter] vector_dimension: int | None api_model_support: EmbeddingAPIModelSupport def __repr__(self) -> str: return f"EmbeddingProviderModel(name='{self.name}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "name": self.name, "parameters": [parameter.as_dict() for parameter in self.parameters], "vectorDimension": self.vector_dimension, "apiModelSupport": self.api_model_support.as_dict(), } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderModel: """ Create an instance of EmbeddingProviderModel from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "name", "parameters", "vectorDimension", "apiModelSupport", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingProviderModel`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingProviderModel( name=raw_dict["name"], parameters=[ EmbeddingProviderParameter._from_dict(param_dict) for param_dict in raw_dict["parameters"] ], vector_dimension=raw_dict["vectorDimension"], api_model_support=EmbeddingAPIModelSupport._from_dict( raw_dict.get("apiModelSupport") or {}, ), )A representation of an embedding model as returned by the 'findEmbeddingProviders' Data API endpoint.
Attributes
name- the model name as must be passed when issuing vectorize operations to the API.
parameters- a list of the
EmbeddingProviderParameterobjects the model admits. vector_dimension- an integer for the dimensionality of the embedding model. if this is None, the dimension can assume multiple values as specified by a corresponding parameter listed with the model.
api_model_support- the status of API support for the model, in the form of an EmbeddingAPIModelSupport object.
Instance variables
var api_model_support : EmbeddingAPIModelSupport-
The type of the None singleton.
var name : str-
The type of the None singleton.
var parameters : list[EmbeddingProviderParameter]-
The type of the None singleton.
var vector_dimension : int | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "name": self.name, "parameters": [parameter.as_dict() for parameter in self.parameters], "vectorDimension": self.vector_dimension, "apiModelSupport": self.api_model_support.as_dict(), }Recast this object into a dictionary.
class EmbeddingProviderParameter (default_value: Any,
display_name: str | None,
help: str | None,
hint: str | None,
name: str,
required: bool,
parameter_type: str,
validation: dict[str, Any])-
Expand source code
@dataclass class EmbeddingProviderParameter: """ A representation of a parameter as returned by the 'findEmbeddingProviders' Data API endpoint. Attributes: default_value: the default value for the parameter. help: a textual description of the parameter. name: the name to use when passing the parameter for vectorize operations. required: whether the parameter is required or not. parameter_type: a textual description of the data type for the parameter. validation: a dictionary describing a parameter-specific validation policy. """ default_value: Any display_name: str | None help: str | None hint: str | None name: str required: bool parameter_type: str validation: dict[str, Any] def __repr__(self) -> str: return f"EmbeddingProviderParameter(name='{self.name}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "defaultValue": self.default_value, "displayName": self.display_name, "help": self.help, "hint": self.hint, "name": self.name, "required": self.required, "type": self.parameter_type, "validation": self.validation, }.items() if v is not None } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderParameter: """ Create an instance of EmbeddingProviderParameter from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "defaultValue", "displayName", "help", "hint", "name", "required", "type", "validation", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingProviderParameter`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingProviderParameter( default_value=raw_dict.get("defaultValue"), display_name=raw_dict.get("displayName"), help=raw_dict.get("help"), hint=raw_dict.get("hint"), name=raw_dict["name"], required=raw_dict["required"], parameter_type=raw_dict["type"], validation=raw_dict["validation"], )A representation of a parameter as returned by the 'findEmbeddingProviders' Data API endpoint.
Attributes
default_value- the default value for the parameter.
help- a textual description of the parameter.
name- the name to use when passing the parameter for vectorize operations.
required- whether the parameter is required or not.
parameter_type- a textual description of the data type for the parameter.
validation- a dictionary describing a parameter-specific validation policy.
Instance variables
var default_value : Any-
The type of the None singleton.
var display_name : str | None-
The type of the None singleton.
var help : str | None-
The type of the None singleton.
var hint : str | None-
The type of the None singleton.
var name : str-
The type of the None singleton.
var parameter_type : str-
The type of the None singleton.
var required : bool-
The type of the None singleton.
var validation : dict[str, typing.Any]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "defaultValue": self.default_value, "displayName": self.display_name, "help": self.help, "hint": self.hint, "name": self.name, "required": self.required, "type": self.parameter_type, "validation": self.validation, }.items() if v is not None }Recast this object into a dictionary.
class EmbeddingProviderToken (accepted: str, forwarded: str)-
Expand source code
@dataclass class EmbeddingProviderToken: """ A representation of a "token", that is a specific secret string, needed by an embedding model; this models a part of the response from the 'findEmbeddingProviders' Data API endpoint. Attributes: accepted: the name of this "token" as seen by the Data API. This is the name that should be used in the clients when supplying the secret, whether as header or by shared-secret. forwarded: the name used by the API when issuing the embedding request to the embedding provider. This is of no direct interest for the Data API user. """ accepted: str forwarded: str def __repr__(self) -> str: return f"EmbeddingProviderToken('{self.accepted}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "accepted": self.accepted, "forwarded": self.forwarded, } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> EmbeddingProviderToken: """ Create an instance of EmbeddingProviderToken from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "accepted", "forwarded", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"an `EmbeddingProviderToken`: '{','.join(sorted(residual_keys))}'" ) return EmbeddingProviderToken( accepted=raw_dict["accepted"], forwarded=raw_dict["forwarded"], )A representation of a "token", that is a specific secret string, needed by an embedding model; this models a part of the response from the 'findEmbeddingProviders' Data API endpoint.
Attributes
accepted- the name of this "token" as seen by the Data API. This is the name that should be used in the clients when supplying the secret, whether as header or by shared-secret.
forwarded- the name used by the API when issuing the embedding request to the embedding provider. This is of no direct interest for the Data API user.
Instance variables
var accepted : str-
The type of the None singleton.
var forwarded : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "accepted": self.accepted, "forwarded": self.forwarded, }Recast this object into a dictionary.
class FindEmbeddingProvidersResult (embedding_providers: dict[str, EmbeddingProvider],
raw_info: dict[str, Any] | None)-
Expand source code
@dataclass class FindEmbeddingProvidersResult: """ A representation of the whole response from the 'findEmbeddingProviders' Data API endpoint. Attributes: embedding_providers: a dictionary of provider names to EmbeddingProvider objects. raw_info: a (nested) dictionary containing the original full response from the endpoint. """ def __repr__(self) -> str: return ( "FindEmbeddingProvidersResult(embedding_providers=" f"{', '.join(sorted(self.embedding_providers.keys()))})" ) embedding_providers: dict[str, EmbeddingProvider] raw_info: dict[str, Any] | None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "embeddingProviders": { ep_name: e_provider.as_dict() for ep_name, e_provider in self.embedding_providers.items() }, } @staticmethod def _from_dict(raw_dict: dict[str, Any]) -> FindEmbeddingProvidersResult: """ Create an instance of FindEmbeddingProvidersResult from a dictionary such as one from the Data API. """ residual_keys = raw_dict.keys() - { "embeddingProviders", } if residual_keys: warnings.warn( "Unexpected key(s) encountered parsing a dictionary into " f"a `FindEmbeddingProvidersResult`: '{','.join(sorted(residual_keys))}'" ) return FindEmbeddingProvidersResult( raw_info=raw_dict, embedding_providers={ ep_name: EmbeddingProvider._from_dict(ep_body) for ep_name, ep_body in raw_dict["embeddingProviders"].items() }, )A representation of the whole response from the 'findEmbeddingProviders' Data API endpoint.
Attributes
embedding_providers- a dictionary of provider names to EmbeddingProvider objects.
raw_info- a (nested) dictionary containing the original full response from the endpoint.
Instance variables
var embedding_providers : dict[str, EmbeddingProvider]-
The type of the None singleton.
var raw_info : dict[str, typing.Any] | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "embeddingProviders": { ep_name: e_provider.as_dict() for ep_name, e_provider in self.embedding_providers.items() }, }Recast this object into a dictionary.
class FindRerankingProvidersResult (reranking_providers: dict[str, RerankingProvider],
raw_info: dict[str, Any] | None)-
Expand source code
@dataclass class FindRerankingProvidersResult: """ A representation of the whole response from the 'findRerankingProviders' Data API endpoint. Attributes: reranking_providers: a dictionary of provider names to RerankingProvider objects. raw_info: a (nested) dictionary containing the original full response from the endpoint. """ def __repr__(self) -> str: return ( "FindRerankingProvidersResult(reranking_providers=" f"{', '.join(sorted(self.reranking_providers.keys()))})" ) reranking_providers: dict[str, RerankingProvider] raw_info: dict[str, Any] | None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "rerankingProviders": { rp_name: r_provider.as_dict() for rp_name, r_provider in self.reranking_providers.items() }, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> FindRerankingProvidersResult: """ Create an instance of FindRerankingProvidersResult from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"rerankingProviders"}) return FindRerankingProvidersResult( raw_info=raw_dict, reranking_providers={ rp_name: RerankingProvider._from_dict(rp_body) for rp_name, rp_body in raw_dict["rerankingProviders"].items() }, )A representation of the whole response from the 'findRerankingProviders' Data API endpoint.
Attributes
reranking_providers- a dictionary of provider names to RerankingProvider objects.
raw_info- a (nested) dictionary containing the original full response from the endpoint.
Instance variables
var raw_info : dict[str, typing.Any] | None-
The type of the None singleton.
var reranking_providers : dict[str, RerankingProvider]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "rerankingProviders": { rp_name: r_provider.as_dict() for rp_name, r_provider in self.reranking_providers.items() }, }Recast this object into a dictionary.
class ListTableDefinition (columns: dict[str, TableColumnTypeDescriptor],
primary_key: TablePrimaryKeyDescriptor)-
Expand source code
@dataclass class ListTableDefinition: """ A structure expressing the definition ("schema") of a table the way the Data API describes it. This is the returned object when querying the Data API about table metadata. This class differs from `CreateTableDefinition`, used when creating tables: this one can also describe tables with unsupported features, which could not be created through the Data API. Attributes: columns: a map from column names to their type definition object. primary_key: a specification of the primary key for the table. """ columns: dict[str, TableColumnTypeDescriptor] primary_key: TablePrimaryKeyDescriptor def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ f"columns=[{','.join(self.columns.keys())}]", f"primary_key={self.primary_key}", ] if pc is not None ] return f"{self.__class__.__name__}({', '.join(not_null_pieces)})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "columns": { col_n: col_v.as_dict() for col_n, col_v in self.columns.items() }, "primaryKey": self.primary_key.as_dict(), }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> ListTableDefinition: """ Create an instance of ListTableDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"columns", "primaryKey"}) return ListTableDefinition( columns={ col_n: TableColumnTypeDescriptor.coerce(col_v) for col_n, col_v in raw_dict["columns"].items() }, primary_key=TablePrimaryKeyDescriptor.coerce(raw_dict["primaryKey"]), ) @classmethod def coerce( cls, raw_input: ListTableDefinition | dict[str, Any] ) -> ListTableDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDefinition. """ if isinstance(raw_input, ListTableDefinition): return raw_input else: return cls._from_dict(raw_input)A structure expressing the definition ("schema") of a table the way the Data API describes it. This is the returned object when querying the Data API about table metadata.
This class differs from
CreateTableDefinition, used when creating tables: this one can also describe tables with unsupported features, which could not be created through the Data API.Attributes
columns- a map from column names to their type definition object.
primary_key- a specification of the primary key for the table.
Static methods
def coerce(raw_input: ListTableDefinition | dict[str, Any]) ‑> ListTableDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDefinition.
Instance variables
var columns : dict[str, TableColumnTypeDescriptor]-
The type of the None singleton.
var primary_key : TablePrimaryKeyDescriptor-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "columns": { col_n: col_v.as_dict() for col_n, col_v in self.columns.items() }, "primaryKey": self.primary_key.as_dict(), }.items() if v is not None }Recast this object into a dictionary.
class ListTableDescriptor (name: str,
definition: ListTableDefinition,
raw_descriptor: dict[str, Any] | None)-
Expand source code
@dataclass class ListTableDescriptor: """ A structure expressing full description of a table as the Data API returns it, i.e. its name and its `definition` sub-structure. Attributes: name: the name of the table. definition: a ListTableDefinition instance. raw_descriptor: the raw response from the Data API. """ name: str definition: ListTableDefinition raw_descriptor: dict[str, Any] | None def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ f"name={self.name.__repr__()}", f"definition={self.definition.__repr__()}", None if self.raw_descriptor is None else "raw_descriptor=...", ] if pc is not None ] return f"{self.__class__.__name__}({', '.join(not_null_pieces)})" def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. Empty `definition` will not be returned at all. """ return { k: v for k, v in { "name": self.name, "definition": self.definition.as_dict(), }.items() if v } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> ListTableDescriptor: """ Create an instance of ListTableDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"name", "definition"}) return ListTableDescriptor( name=raw_dict["name"], definition=ListTableDefinition.coerce(raw_dict.get("definition") or {}), raw_descriptor=raw_dict, ) @classmethod def coerce( cls, raw_input: ListTableDescriptor | dict[str, Any] ) -> ListTableDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDescriptor. """ if isinstance(raw_input, ListTableDescriptor): return raw_input else: return cls._from_dict(raw_input)A structure expressing full description of a table as the Data API returns it, i.e. its name and its
definitionsub-structure.Attributes
name- the name of the table.
definition- a ListTableDefinition instance.
raw_descriptor- the raw response from the Data API.
Static methods
def coerce(raw_input: ListTableDescriptor | dict[str, Any]) ‑> ListTableDescriptor-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTableDescriptor.
Instance variables
var definition : ListTableDefinition-
The type of the None singleton.
var name : str-
The type of the None singleton.
var raw_descriptor : dict[str, typing.Any] | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """ Recast this object into a dictionary. Empty `definition` will not be returned at all. """ return { k: v for k, v in { "name": self.name, "definition": self.definition.as_dict(), }.items() if v }Recast this object into a dictionary. Empty
definitionwill not be returned at all.
class ListTypeDescriptor (*,
udt_type: TableUDTColumnType | TableUnsupportedColumnType,
udt_name: str | None,
definition: CreateTypeDefinition | None,
api_support: TableAPISupportDescriptor | None)-
Expand source code
@dataclass class ListTypeDescriptor: """ A structure describing a user-defined type (UDT) stored on the database. This object is used for the items returned by the database `list_types` method. `ListTypeDescriptor` expresses all information received by the Data API, including (when provided) the UDT name as found on the database, the UDT name and possibly a sub-object detailing the allowed operations with the UDT. This object must be able to describe any item returned from the Data API: this means it can describe "unsupported" UDTs as well (i.e. those which have been created outside of the Data API). Unsupported UDTs lack some attributes compared to the fully-supported ones. Attributes: udt_type: a value of either the TableUDTColumnType or the TableUnsupportedColumnType enum, depending on the UDT support status. udt_name: the name of the UDT as is stored in the database (and in a keyspace). definition: the definition of the type, i.e. its fields and their types. api_support: a structure detailing what operations the type supports. """ udt_type: TableUDTColumnType | TableUnsupportedColumnType udt_name: str | None definition: CreateTypeDefinition | None api_support: TableAPISupportDescriptor | None def __init__( self, *, udt_type: TableUDTColumnType | TableUnsupportedColumnType, udt_name: str | None, definition: CreateTypeDefinition | None, api_support: TableAPISupportDescriptor | None, ) -> None: self.udt_type = udt_type self.udt_name = udt_name self.definition = definition self.api_support = api_support def __repr__(self) -> str: if isinstance(self.udt_type, TableUnsupportedColumnType): return f"{self.__class__.__name__}({self.udt_type.value})" else: return f"{self.__class__.__name__}({self.udt_name}: {self.definition})" @staticmethod def _is_valid_dict(raw_dict: dict[str, Any]) -> bool: """ Assess whether a dictionary can be converted into a ListTypeDescriptor. This can be used by e.g. the database `list_types` method to filter offending responses and issue warnings if needed. Returns: True if and only if the dict is valid, otherwise False. """ return all(fld in raw_dict for fld in {"type", "apiSupport"}) def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.udt_type.value, "udtName": self.udt_name, "definition": self.definition.as_dict() if self.definition is not None else None, "apiSupport": self.api_support.as_dict() if self.api_support is not None else None, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> ListTypeDescriptor: """ Create an instance of ListTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"type", "udtName", "definition", "apiSupport"} ) _udt_type: TableUDTColumnType | TableUnsupportedColumnType if raw_dict["type"] in TableUDTColumnType: _udt_type = TableUDTColumnType.coerce(raw_dict["type"]) else: _udt_type = TableUnsupportedColumnType.coerce(raw_dict["type"]) return ListTypeDescriptor( udt_type=_udt_type, udt_name=raw_dict.get("udtName"), definition=CreateTypeDefinition._from_dict(raw_dict["definition"]) if "definition" in raw_dict else None, api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if "apiSupport" in raw_dict else None, ) @classmethod def coerce( cls, raw_input: ListTypeDescriptor | dict[str, Any] ) -> ListTypeDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTypeDescriptor. """ if isinstance(raw_input, ListTypeDescriptor): return raw_input else: return cls._from_dict(raw_input)A structure describing a user-defined type (UDT) stored on the database.
This object is used for the items returned by the database
list_typesmethod.ListTypeDescriptorexpresses all information received by the Data API, including (when provided) the UDT name as found on the database, the UDT name and possibly a sub-object detailing the allowed operations with the UDT.This object must be able to describe any item returned from the Data API: this means it can describe "unsupported" UDTs as well (i.e. those which have been created outside of the Data API). Unsupported UDTs lack some attributes compared to the fully-supported ones.
Attributes
udt_type- a value of either the TableUDTColumnType or the TableUnsupportedColumnType enum, depending on the UDT support status.
udt_name- the name of the UDT as is stored in the database (and in a keyspace).
definition- the definition of the type, i.e. its fields and their types.
api_support- a structure detailing what operations the type supports.
Static methods
def coerce(raw_input: ListTypeDescriptor | dict[str, Any]) ‑> ListTypeDescriptor-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a ListTypeDescriptor.
Instance variables
var api_support : TableAPISupportDescriptor | None-
The type of the None singleton.
var definition : CreateTypeDefinition | None-
The type of the None singleton.
var udt_name : str | None-
The type of the None singleton.
var udt_type : TableUDTColumnType | TableUnsupportedColumnType-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.udt_type.value, "udtName": self.udt_name, "definition": self.definition.as_dict() if self.definition is not None else None, "apiSupport": self.api_support.as_dict() if self.api_support is not None else None, }.items() if v is not None }Recast this object into a dictionary.
class RerankServiceOptions (provider: str | None,
model_name: str | None,
authentication: dict[str, Any] | None = None,
parameters: dict[str, Any] | None = None)-
Expand source code
@dataclass class RerankServiceOptions: """ The "rerank.service" component of the collection options. See the Data API specifications for allowed values. Attributes: provider: the name of a service provider for reranking. model_name: the name of a specific model for use by the service. authentication: a key-value dictionary for the "authentication" specification, if any, in the reranking service options. parameters: a key-value dictionary for the "parameters" specification, if any, in the reranking service options. """ provider: str | None model_name: str | None authentication: dict[str, Any] | None = None parameters: dict[str, Any] | None = None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "provider": self.provider, "modelName": self.model_name, "authentication": self.authentication, "parameters": self.parameters, }.items() if v is not None } @staticmethod def _from_dict( raw_dict: dict[str, Any] | None, ) -> RerankServiceOptions | None: """ Create an instance of RerankServiceOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return RerankServiceOptions( provider=raw_dict.get("provider"), model_name=raw_dict.get("modelName"), authentication=raw_dict.get("authentication"), parameters=raw_dict.get("parameters"), ) else: return None @staticmethod def coerce( raw_input: RerankServiceOptions | dict[str, Any] | None, ) -> RerankServiceOptions | None: if isinstance(raw_input, RerankServiceOptions): return raw_input else: return RerankServiceOptions._from_dict(raw_input)The "rerank.service" component of the collection options. See the Data API specifications for allowed values.
Attributes
provider- the name of a service provider for reranking.
model_name- the name of a specific model for use by the service.
authentication- a key-value dictionary for the "authentication" specification, if any, in the reranking service options.
parameters- a key-value dictionary for the "parameters" specification, if any, in the reranking service options.
Static methods
def coerce(raw_input: RerankServiceOptions | dict[str, Any] | None) ‑> RerankServiceOptions | None-
Expand source code
@staticmethod def coerce( raw_input: RerankServiceOptions | dict[str, Any] | None, ) -> RerankServiceOptions | None: if isinstance(raw_input, RerankServiceOptions): return raw_input else: return RerankServiceOptions._from_dict(raw_input)
Instance variables
var authentication : dict[str, typing.Any] | None-
The type of the None singleton.
var model_name : str | None-
The type of the None singleton.
var parameters : dict[str, typing.Any] | None-
The type of the None singleton.
var provider : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "provider": self.provider, "modelName": self.model_name, "authentication": self.authentication, "parameters": self.parameters, }.items() if v is not None }Recast this object into a dictionary.
class RerankingAPIModelSupport (status: str, message: str | None)-
Expand source code
@dataclass class RerankingAPIModelSupport: """ A representation of the API support status for a reranking model. Attributes: status: a string describing the support status. message: an optional string message alongside the status. """ status: str message: str | None def __repr__(self) -> str: return f"RerankingAPIModelSupport({self.status})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "status": self.status, "message": self.message, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingAPIModelSupport: """ Create an instance of RerankingAPIModelSupport from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"status", "message"}) return RerankingAPIModelSupport( status=raw_dict.get("status") or "SUPPORTED", message=raw_dict.get("message"), )A representation of the API support status for a reranking model.
Attributes
status- a string describing the support status.
message- an optional string message alongside the status.
Instance variables
var message : str | None-
The type of the None singleton.
var status : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "status": self.status, "message": self.message, }.items() if v is not None }Recast this object into a dictionary.
class RerankingProvider (is_default: bool,
display_name: str | None,
supported_authentication: dict[str, RerankingProviderAuthentication],
models: list[RerankingProviderModel],
parameters: list[RerankingProviderParameter],
url: str | None)-
Expand source code
@dataclass class RerankingProvider: """ A representation of a reranking provider, as returned by the 'findRerankingProviders' Data API endpoint. Attributes: display_name: a version of the provider name for display and pretty printing. Not to be used when issuing vectorize API requests (for the latter, it is the key in the providers dictionary that is required). models: a list of `RerankingProviderModel` objects pertaining to the provider. parameters: a list of `RerankingProviderParameter` objects common to all models for this provider. supported_authentication: a dictionary of the authentication modes for this provider. Note that disabled modes may still appear in this map, albeit with the `enabled` property set to False. url: a string template for the URL used by the Data API when issuing the request toward the reranking provider. This is of no direct concern to the Data API user. """ is_default: bool display_name: str | None supported_authentication: dict[str, RerankingProviderAuthentication] models: list[RerankingProviderModel] parameters: list[RerankingProviderParameter] url: str | None def __repr__(self) -> str: _default_desc = "<Default> " if self.is_default else "" return ( f"RerankingProvider({_default_desc}display_name='{self.display_name}', " f"models={self.models})" ) def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return dict( [ pair for pair in [ ("isDefault", self.is_default), ("displayName", self.display_name), ("models", [model.as_dict() for model in self.models]), ( "supportedAuthentication", { sa_name: sa_value.as_dict() for sa_name, sa_value in self.supported_authentication.items() }, ), ( "parameters", [parameter.as_dict() for parameter in self.parameters], ) if self.parameters else None, ("url", self.url) if self.url else None, ] if pair is not None ] ) @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingProvider: """ Create an instance of RerankingProvider from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, { "isDefault", "displayName", "models", "parameters", "supportedAuthentication", "url", }, ) return RerankingProvider( is_default=raw_dict["isDefault"], display_name=raw_dict["displayName"], models=[ RerankingProviderModel._from_dict(model_dict) for model_dict in raw_dict["models"] ], parameters=[ RerankingProviderParameter._from_dict(param_dict) for param_dict in raw_dict.get("parameters") or [] ], supported_authentication={ sa_name: RerankingProviderAuthentication._from_dict(sa_dict) for sa_name, sa_dict in raw_dict["supportedAuthentication"].items() }, url=raw_dict.get("url"), )A representation of a reranking provider, as returned by the 'findRerankingProviders' Data API endpoint.
Attributes
display_name- a version of the provider name for display and pretty printing. Not to be used when issuing vectorize API requests (for the latter, it is the key in the providers dictionary that is required).
models- a list of
RerankingProviderModelobjects pertaining to the provider. parameters- a list of
RerankingProviderParameterobjects common to all models for this provider. supported_authentication- a dictionary of the authentication modes for
this provider. Note that disabled modes may still appear in this map,
albeit with the
enabledproperty set to False. url- a string template for the URL used by the Data API when issuing the request toward the reranking provider. This is of no direct concern to the Data API user.
Instance variables
var display_name : str | None-
The type of the None singleton.
var is_default : bool-
The type of the None singleton.
var models : list[RerankingProviderModel]-
The type of the None singleton.
var parameters : list[RerankingProviderParameter]-
The type of the None singleton.
var supported_authentication : dict[str, RerankingProviderAuthentication]-
The type of the None singleton.
var url : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return dict( [ pair for pair in [ ("isDefault", self.is_default), ("displayName", self.display_name), ("models", [model.as_dict() for model in self.models]), ( "supportedAuthentication", { sa_name: sa_value.as_dict() for sa_name, sa_value in self.supported_authentication.items() }, ), ( "parameters", [parameter.as_dict() for parameter in self.parameters], ) if self.parameters else None, ("url", self.url) if self.url else None, ] if pair is not None ] )Recast this object into a dictionary.
class RerankingProviderAuthentication (enabled: bool,
tokens: list[RerankingProviderToken])-
Expand source code
@dataclass class RerankingProviderAuthentication: """ A representation of an authentication mode for using a reranking model, modeling the corresponding part of the response returned by the 'findRerankingProviders' Data API endpoint (namely "supportedAuthentication"). Attributes: enabled: whether this authentication mode is available for a given model. tokens: a list of `RerankingProviderToken` objects, detailing the secrets required for the authentication mode. """ enabled: bool tokens: list[RerankingProviderToken] def __repr__(self) -> str: return ( f"RerankingProviderAuthentication(enabled={self.enabled}, " f"tokens={','.join(str(token) for token in self.tokens)})" ) def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "enabled": self.enabled, "tokens": [token.as_dict() for token in self.tokens], } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingProviderAuthentication: """ Create an instance of RerankingProviderAuthentication from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"enabled", "tokens"}) return RerankingProviderAuthentication( enabled=raw_dict["enabled"], tokens=[ RerankingProviderToken._from_dict(token_dict) for token_dict in raw_dict["tokens"] ], )A representation of an authentication mode for using a reranking model, modeling the corresponding part of the response returned by the 'findRerankingProviders' Data API endpoint (namely "supportedAuthentication").
Attributes
enabled- whether this authentication mode is available for a given model.
tokens- a list of
RerankingProviderTokenobjects, detailing the secrets required for the authentication mode.
Instance variables
var enabled : bool-
The type of the None singleton.
var tokens : list[RerankingProviderToken]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "enabled": self.enabled, "tokens": [token.as_dict() for token in self.tokens], }Recast this object into a dictionary.
class RerankingProviderModel (name: str,
is_default: bool,
url: str | None,
properties: dict[str, Any] | None,
parameters: list[RerankingProviderParameter],
api_model_support: RerankingAPIModelSupport)-
Expand source code
@dataclass class RerankingProviderModel: """ A representation of a reranking model as returned by the 'findRerankingProviders' Data API endpoint. Attributes: name: the model name as must be passed when issuing vectorize operations to the API. is_default: a flag set by the Data API to mark a reranking model as the default. url: an URL associated to invoking the reranking model. properties: a free-form dictionary with string keys, describing the model. parameters: a list of the `RerankingProviderParameter` objects the model admits. api_model_support: the status of API support for the model, in the form of a RerankingAPIModelSupport object. """ name: str is_default: bool url: str | None properties: dict[str, Any] | None parameters: list[RerankingProviderParameter] api_model_support: RerankingAPIModelSupport def __repr__(self) -> str: _default_desc = "<Default> " if self.is_default else "" return f"RerankingProviderModel({_default_desc}name='{self.name}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return dict( [ pair for pair in [ ("name", self.name), ("isDefault", self.is_default), ("url", self.url), ("properties", self.properties), ( "parameters", [parameter.as_dict() for parameter in self.parameters], ) if self.parameters else None, ("apiModelSupport", self.api_model_support.as_dict()), ] if pair is not None ] ) @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingProviderModel: """ Create an instance of RerankingProviderModel from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, { "name", "isDefault", "url", "properties", "parameters", "apiModelSupport", }, ) return RerankingProviderModel( name=raw_dict["name"], is_default=raw_dict["isDefault"], url=raw_dict.get("url"), properties=raw_dict["properties"], parameters=[ RerankingProviderParameter._from_dict(param_dict) for param_dict in raw_dict.get("parameters") or [] ], api_model_support=RerankingAPIModelSupport._from_dict( raw_dict.get("apiModelSupport") or {}, ), )A representation of a reranking model as returned by the 'findRerankingProviders' Data API endpoint.
Attributes
name- the model name as must be passed when issuing vectorize operations to the API.
is_default- a flag set by the Data API to mark a reranking model as the default.
url- an URL associated to invoking the reranking model.
properties- a free-form dictionary with string keys, describing the model.
parameters- a list of the
RerankingProviderParameterobjects the model admits. api_model_support- the status of API support for the model, in the form of a RerankingAPIModelSupport object.
Instance variables
var api_model_support : RerankingAPIModelSupport-
The type of the None singleton.
var is_default : bool-
The type of the None singleton.
var name : str-
The type of the None singleton.
var parameters : list[RerankingProviderParameter]-
The type of the None singleton.
var properties : dict[str, typing.Any] | None-
The type of the None singleton.
var url : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return dict( [ pair for pair in [ ("name", self.name), ("isDefault", self.is_default), ("url", self.url), ("properties", self.properties), ( "parameters", [parameter.as_dict() for parameter in self.parameters], ) if self.parameters else None, ("apiModelSupport", self.api_model_support.as_dict()), ] if pair is not None ] )Recast this object into a dictionary.
class RerankingProviderParameter (default_value: Any,
display_name: str | None,
help: str | None,
hint: str | None,
name: str,
required: bool,
parameter_type: str,
validation: dict[str, Any])-
Expand source code
@dataclass class RerankingProviderParameter: """ A representation of a parameter as returned by the 'findRerankingProviders' Data API endpoint. Attributes: default_value: the default value for the parameter. help: a textual description of the parameter. name: the name to use when passing the parameter for vectorize operations. required: whether the parameter is required or not. parameter_type: a textual description of the data type for the parameter. validation: a dictionary describing a parameter-specific validation policy. """ default_value: Any display_name: str | None help: str | None hint: str | None name: str required: bool parameter_type: str validation: dict[str, Any] def __repr__(self) -> str: return f"RerankingProviderParameter(name='{self.name}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "defaultValue": self.default_value, "displayName": self.display_name, "help": self.help, "hint": self.hint, "name": self.name, "required": self.required, "type": self.parameter_type, "validation": self.validation, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingProviderParameter: """ Create an instance of RerankingProviderParameter from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, { "defaultValue", "displayName", "help", "hint", "name", "required", "type", "validation", }, ) return RerankingProviderParameter( default_value=raw_dict.get("defaultValue"), display_name=raw_dict.get("displayName"), help=raw_dict.get("help"), hint=raw_dict.get("hint"), name=raw_dict["name"], required=raw_dict["required"], parameter_type=raw_dict["type"], validation=raw_dict["validation"], )A representation of a parameter as returned by the 'findRerankingProviders' Data API endpoint.
Attributes
default_value- the default value for the parameter.
help- a textual description of the parameter.
name- the name to use when passing the parameter for vectorize operations.
required- whether the parameter is required or not.
parameter_type- a textual description of the data type for the parameter.
validation- a dictionary describing a parameter-specific validation policy.
Instance variables
var default_value : Any-
The type of the None singleton.
var display_name : str | None-
The type of the None singleton.
var help : str | None-
The type of the None singleton.
var hint : str | None-
The type of the None singleton.
var name : str-
The type of the None singleton.
var parameter_type : str-
The type of the None singleton.
var required : bool-
The type of the None singleton.
var validation : dict[str, typing.Any]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "defaultValue": self.default_value, "displayName": self.display_name, "help": self.help, "hint": self.hint, "name": self.name, "required": self.required, "type": self.parameter_type, "validation": self.validation, }.items() if v is not None }Recast this object into a dictionary.
class RerankingProviderToken (accepted: str, forwarded: str)-
Expand source code
@dataclass class RerankingProviderToken: """ A representation of a "token", that is a specific secret string, needed by a reranking model; this models a part of the response from the 'findRerankingProviders' Data API endpoint. Attributes: accepted: the name of this "token" as seen by the Data API. This is the name that should be used in the clients when supplying the secret, whether as header or by shared-secret. forwarded: the name used by the API when issuing the reranking request to the reranking provider. This is of no direct interest for the Data API user. """ accepted: str forwarded: str def __repr__(self) -> str: return f"RerankingProviderToken('{self.accepted}')" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "accepted": self.accepted, "forwarded": self.forwarded, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> RerankingProviderToken: """ Create an instance of RerankingProviderToken from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"accepted", "forwarded"}) return RerankingProviderToken( accepted=raw_dict["accepted"], forwarded=raw_dict["forwarded"], )A representation of a "token", that is a specific secret string, needed by a reranking model; this models a part of the response from the 'findRerankingProviders' Data API endpoint.
Attributes
accepted- the name of this "token" as seen by the Data API. This is the name that should be used in the clients when supplying the secret, whether as header or by shared-secret.
forwarded- the name used by the API when issuing the reranking request to the reranking provider. This is of no direct interest for the Data API user.
Instance variables
var accepted : str-
The type of the None singleton.
var forwarded : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "accepted": self.accepted, "forwarded": self.forwarded, }Recast this object into a dictionary.
class TableAPIIndexSupportDescriptor (cql_definition: str, create_index: bool, filter: bool)-
Expand source code
@dataclass class TableAPIIndexSupportDescriptor: """ Represents the additional information returned by the Data API when describing an index that has 'unsupported' status. Unsupported indexes may have been created by means other than the Data API (e.g. CQL direct interaction with the database). The Data API reports these indexes along with the others when listing the indexes, and provides the information marshaled in this object to detail which level of support the index has (for instance, it can be a partial support where the index can still be used to filter reads). Attributes: cql_definition: a free-form string containing the CQL definition for the index. create_index: whether such an index can be created through the Data API. filter: whether the index can be involved in a Data API filter clause. """ cql_definition: str create_index: bool filter: bool def __repr__(self) -> str: desc = ", ".join( [ f'"{self.cql_definition}"', f"create_index={self.create_index}", f"filter={self.filter}", ] ) return f"{self.__class__.__name__}({desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "cqlDefinition": self.cql_definition, "createIndex": self.create_index, "filter": self.filter, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableAPIIndexSupportDescriptor: """ Create an instance of TableAPIIndexSupportDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"cqlDefinition", "createIndex", "filter"}, ) return TableAPIIndexSupportDescriptor( cql_definition=raw_dict["cqlDefinition"], create_index=raw_dict["createIndex"], filter=raw_dict["filter"], )Represents the additional information returned by the Data API when describing an index that has 'unsupported' status. Unsupported indexes may have been created by means other than the Data API (e.g. CQL direct interaction with the database).
The Data API reports these indexes along with the others when listing the indexes, and provides the information marshaled in this object to detail which level of support the index has (for instance, it can be a partial support where the index can still be used to filter reads).
Attributes
cql_definition- a free-form string containing the CQL definition for the index.
create_index- whether such an index can be created through the Data API.
filter- whether the index can be involved in a Data API filter clause.
Instance variables
var cql_definition : str-
The type of the None singleton.
var create_index : bool-
The type of the None singleton.
var filter : bool-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "cqlDefinition": self.cql_definition, "createIndex": self.create_index, "filter": self.filter, }Recast this object into a dictionary.
class TableAPISupportDescriptor (cql_definition: str, create_table: bool, insert: bool, filter: bool, read: bool)-
Expand source code
@dataclass class TableAPISupportDescriptor: """ Represents the additional support information returned by the Data API when describing columns of a table. Some columns indeed require a detailed description of what operations are supported on them - this includes, but is not limited to, columns created by means other than the Data API (e.g. CQL direct interaction with the database). When the Data API reports these columns (in listing the tables and their metadata), it provides the information marshaled in this object to detail which level of support the column has (for instance, it can be a partial support whereby the column is readable by the API but not writable). Attributes: cql_definition: a free-form string containing the CQL definition for the column. create_table: whether a column of this nature can be used in API table creation. insert: whether a column of this nature can be written through the API. filter: whether a column of this nature can be used for filtering with API find. read: whether a column of this nature can be read through the API. """ cql_definition: str create_table: bool insert: bool filter: bool read: bool def __repr__(self) -> str: desc = ", ".join( [ f'"{self.cql_definition}"', f"create_table={self.create_table}", f"insert={self.insert}", f"filter={self.filter}", f"read={self.read}", ] ) return f"{self.__class__.__name__}({desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "cqlDefinition": self.cql_definition, "createTable": self.create_table, "insert": self.insert, "filter": self.filter, "read": self.read, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableAPISupportDescriptor: """ Create an instance of TableAPISupportDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"cqlDefinition", "createTable", "insert", "filter", "read"}, ) return TableAPISupportDescriptor( cql_definition=raw_dict["cqlDefinition"], create_table=raw_dict["createTable"], insert=raw_dict["insert"], filter=raw_dict["filter"], read=raw_dict["read"], )Represents the additional support information returned by the Data API when describing columns of a table. Some columns indeed require a detailed description of what operations are supported on them - this includes, but is not limited to, columns created by means other than the Data API (e.g. CQL direct interaction with the database).
When the Data API reports these columns (in listing the tables and their metadata), it provides the information marshaled in this object to detail which level of support the column has (for instance, it can be a partial support whereby the column is readable by the API but not writable).
Attributes
cql_definition- a free-form string containing the CQL definition for the column.
create_table- whether a column of this nature can be used in API table creation.
insert- whether a column of this nature can be written through the API.
filter- whether a column of this nature can be used for filtering with API find.
read- whether a column of this nature can be read through the API.
Instance variables
var cql_definition : str-
The type of the None singleton.
var create_table : bool-
The type of the None singleton.
var filter : bool-
The type of the None singleton.
var insert : bool-
The type of the None singleton.
var read : bool-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "cqlDefinition": self.cql_definition, "createTable": self.create_table, "insert": self.insert, "filter": self.filter, "read": self.read, }Recast this object into a dictionary.
class TableBaseIndexDefinition (column: str | dict[str, str],
_index_type: TableIndexType)-
Expand source code
@dataclass class TableBaseIndexDefinition(ABC): """ An object describing an index definition, including the name of the indexed column and the index options if there are any. This is an abstract class common to the various types of index: see the appropriate subclass for more details. Attributes: column: the name of the indexed column. For an index on a map column, it can be an object in a format such as {"column": "$values"} and similar. """ column: str | dict[str, str] _index_type: TableIndexType @abstractmethod def as_dict(self) -> dict[str, Any]: ... @classmethod def _from_dict( cls, raw_input: dict[str, Any], ) -> TableBaseIndexDefinition: """ Create an instance of TableBaseIndexDefinition from a dictionary such as one from the Data API. This method inspects the input dictionary to select the right class to use so as to represent the index definition. """ if "options" not in raw_input: if raw_input["column"] == "UNKNOWN" and "apiSupport" in raw_input: return TableUnsupportedIndexDefinition.coerce(raw_input) else: return TableIndexDefinition.coerce(raw_input) else: if "metric" in raw_input["options"]: return TableVectorIndexDefinition.coerce(raw_input) elif "analyzer" in raw_input["options"]: return TableTextIndexDefinition.coerce(raw_input) else: return TableIndexDefinition.coerce(raw_input)An object describing an index definition, including the name of the indexed column and the index options if there are any. This is an abstract class common to the various types of index: see the appropriate subclass for more details.
Attributes
column- the name of the indexed column. For an index on a map column, it can be an object in a format such as {"column": "$values"} and similar.
Ancestors
- abc.ABC
Subclasses
- TableIndexDefinition
- TableTextIndexDefinition
- TableUnsupportedIndexDefinition
- TableVectorIndexDefinition
Instance variables
var column : str | dict[str, str]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
@abstractmethod def as_dict(self) -> dict[str, Any]: ...
class TableIndexDefinition (column: str | dict[str, str],
options: TableIndexOptions | UnsetType = (unset))-
Expand source code
@dataclass class TableIndexDefinition(TableBaseIndexDefinition): """ An object describing a regular (non-vector) index definition, including the name of the indexed column and the index options. Attributes: column: the name of the indexed column. For an index on a map column, it can be an object in a format such as {"column": "$values"} and similar. options: a `TableIndexOptions` detailing the index configuration. """ options: TableIndexOptions def __init__( self, column: str | dict[str, str], options: TableIndexOptions | UnsetType = _UNSET, ) -> None: self._index_type = TableIndexType.REGULAR self.column = column self.options = ( TableIndexOptions() if isinstance(options, UnsetType) else options ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.column}, options={self.options})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v } @classmethod def _from_dict( cls, raw_dict: dict[str, Any], ) -> TableIndexDefinition: """ Create an instance of TableIndexDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"column", "options"}) return TableIndexDefinition( column=raw_dict["column"], options=TableIndexOptions.coerce(raw_dict.get("options") or {}), ) @classmethod def coerce( cls, raw_input: TableIndexDefinition | dict[str, Any], ) -> TableIndexDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDefinition. """ if isinstance(raw_input, TableIndexDefinition): return raw_input else: _filled_raw_input = {**{"options": {}}, **raw_input} return cls._from_dict(_filled_raw_input)An object describing a regular (non-vector) index definition, including the name of the indexed column and the index options.
Attributes
column- the name of the indexed column. For an index on a map column, it can be an object in a format such as {"column": "$values"} and similar.
options- a
TableIndexOptionsdetailing the index configuration.
Ancestors
- TableBaseIndexDefinition
- abc.ABC
Static methods
def coerce(raw_input: TableIndexDefinition | dict[str, Any]) ‑> TableIndexDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDefinition.
Instance variables
var options : TableIndexOptions-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v }Recast this object into a dictionary.
Inherited members
class TableIndexDescriptor (name: str,
definition: TableBaseIndexDefinition,
index_type: TableIndexType | str | UnsetType = (unset))-
Expand source code
@dataclass class TableIndexDescriptor: """ The top-level object describing a table index on a column. The hierarchical arrangement of `TableIndexDescriptor`, which contains a `TableBaseIndexDefinition` (plus possibly index options within the latter), is designed to mirror the shape of payloads and response about indexes in the Data API. Attributes: name: the name of the index. Index names are unique within a keyspace: hence, two tables in the same keyspace cannot use the same name for their indexes. definition: an appropriate concrete subclass of `TableBaseIndexDefinition` providing the detailed definition of the index. index_type: a value in the TableIndexType enum, describing the index type. """ name: str definition: TableBaseIndexDefinition index_type: TableIndexType def __init__( self, name: str, definition: TableBaseIndexDefinition, index_type: TableIndexType | str | UnsetType = _UNSET, ) -> None: self.name = name self.definition = definition if isinstance(index_type, UnsetType): self.index_type = self.definition._index_type else: self.index_type = TableIndexType.coerce(index_type) def __repr__(self) -> str: not_null_pieces = [ pc for pc in ( self.name, f"definition={self.definition}", f"index_type={self.index_type.value}", ) if pc is not None ] inner_desc = ", ".join(not_null_pieces) return f"{self.__class__.__name__}({inner_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "name": self.name, "definition": self.definition.as_dict(), "indexType": self.index_type.value, } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableIndexDescriptor: """ Create an instance of TableIndexDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"name", "definition", "indexType"}) index_definition: TableBaseIndexDefinition idx_type = raw_dict.get("indexType") idx_def = raw_dict["definition"] if idx_type == TableIndexType.REGULAR.value: index_definition = TableIndexDefinition._from_dict(idx_def) elif idx_type == TableIndexType.VECTOR.value: index_definition = TableVectorIndexDefinition._from_dict(idx_def) elif idx_type == TableIndexType.TEXT.value: index_definition = TableTextIndexDefinition._from_dict(idx_def) elif idx_type == TableIndexType.UNKNOWN.value: index_definition = TableUnsupportedIndexDefinition._from_dict(idx_def) else: # not throwing here. Log a warning and try the inspection path logger.warning( f"Found an unexpected indexType when parsing a {cls.__name__} " f"dictionary: {idx_type}. Falling back to inspecting the " f"index definition." ) index_definition = TableBaseIndexDefinition._from_dict( raw_dict["definition"] ) return TableIndexDescriptor( name=raw_dict["name"], definition=index_definition, ) def coerce( raw_input: TableIndexDescriptor | dict[str, Any], ) -> TableIndexDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDescriptor. """ if isinstance(raw_input, TableIndexDescriptor): return raw_input else: return TableIndexDescriptor._from_dict(raw_input)The top-level object describing a table index on a column.
The hierarchical arrangement of
TableIndexDescriptor, which contains aTableBaseIndexDefinition(plus possibly index options within the latter), is designed to mirror the shape of payloads and response about indexes in the Data API.Attributes
name- the name of the index. Index names are unique within a keyspace: hence, two tables in the same keyspace cannot use the same name for their indexes.
definition- an appropriate concrete subclass of
TableBaseIndexDefinitionproviding the detailed definition of the index. index_type- a value in the TableIndexType enum, describing the index type.
Instance variables
var definition : TableBaseIndexDefinition-
The type of the None singleton.
var index_type : TableIndexType-
The type of the None singleton.
var name : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "name": self.name, "definition": self.definition.as_dict(), "indexType": self.index_type.value, }Recast this object into a dictionary.
def coerce(raw_input: TableIndexDescriptor | dict[str, Any]) ‑> TableIndexDescriptor-
Expand source code
def coerce( raw_input: TableIndexDescriptor | dict[str, Any], ) -> TableIndexDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDescriptor. """ if isinstance(raw_input, TableIndexDescriptor): return raw_input else: return TableIndexDescriptor._from_dict(raw_input)Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexDescriptor.
class TableIndexOptions (ascii: bool | UnsetType = (unset),
normalize: bool | UnsetType = (unset),
case_sensitive: bool | UnsetType = (unset))-
Expand source code
@dataclass class TableIndexOptions: """ An object describing the options for a table regular (non-vector) index. Both when creating indexes and retrieving index metadata from the API, instances of TableIndexOptions are used to express the corresponding index settings. Attributes: ascii: whether the index should convert to US-ASCII before indexing. It can be passed only for indexes on a TEXT or ASCII column. normalize: whether the index should normalize Unicode and diacritics before indexing. It can be passed only for indexes on a TEXT or ASCII column. case_sensitive: whether the index should index the input in a case-sensitive manner. It can be passed only for indexes on a TEXT or ASCII column. """ ascii: bool | UnsetType = _UNSET normalize: bool | UnsetType = _UNSET case_sensitive: bool | UnsetType = _UNSET def __repr__(self) -> str: not_null_pieces = [ pc for pc in ( None if isinstance(self.ascii, UnsetType) else f"ascii={self.ascii}", None if isinstance(self.ascii, UnsetType) else f"normalize={self.normalize}", None if isinstance(self.ascii, UnsetType) else f"case_sensitive={self.case_sensitive}", ) if pc is not None ] inner_desc = ", ".join(not_null_pieces) return f"{self.__class__.__name__}({inner_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "ascii": None if isinstance(self.ascii, UnsetType) else self.ascii, "normalize": None if isinstance(self.normalize, UnsetType) else self.normalize, "caseSensitive": None if isinstance(self.case_sensitive, UnsetType) else self.case_sensitive, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableIndexOptions: """ Create an instance of TableIndexOptions from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"ascii", "normalize", "caseSensitive"}) return TableIndexOptions( ascii=raw_dict["ascii"] if raw_dict.get("ascii") is not None else _UNSET, normalize=raw_dict["normalize"] if raw_dict.get("normalize") is not None else _UNSET, case_sensitive=raw_dict["caseSensitive"] if raw_dict.get("caseSensitive") is not None else _UNSET, ) @classmethod def coerce(cls, raw_input: TableIndexOptions | dict[str, Any]) -> TableIndexOptions: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexOptions. """ if isinstance(raw_input, TableIndexOptions): return raw_input else: return cls._from_dict(raw_input)An object describing the options for a table regular (non-vector) index.
Both when creating indexes and retrieving index metadata from the API, instances of TableIndexOptions are used to express the corresponding index settings.
Attributes
ascii- whether the index should convert to US-ASCII before indexing. It can be passed only for indexes on a TEXT or ASCII column.
normalize- whether the index should normalize Unicode and diacritics before indexing. It can be passed only for indexes on a TEXT or ASCII column.
case_sensitive- whether the index should index the input in a case-sensitive manner. It can be passed only for indexes on a TEXT or ASCII column.
Static methods
def coerce(raw_input: TableIndexOptions | dict[str, Any]) ‑> TableIndexOptions-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableIndexOptions.
Instance variables
var ascii : bool | UnsetType-
The type of the None singleton.
var case_sensitive : bool | UnsetType-
The type of the None singleton.
var normalize : bool | UnsetType-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "ascii": None if isinstance(self.ascii, UnsetType) else self.ascii, "normalize": None if isinstance(self.normalize, UnsetType) else self.normalize, "caseSensitive": None if isinstance(self.case_sensitive, UnsetType) else self.case_sensitive, }.items() if v is not None }Recast this object into a dictionary.
class TableIndexType (*args, **kwds)-
Expand source code
class TableIndexType(StrEnum): """ Enum to describe the index types for Table columns. """ REGULAR = "regular" TEXT = "text" UNKNOWN = "UNKNOWN" VECTOR = "vector"Enum to describe the index types for Table columns.
Ancestors
- StrEnum
- enum.Enum
Class variables
var REGULAR-
The type of the None singleton.
var TEXT-
The type of the None singleton.
var UNKNOWN-
The type of the None singleton.
var VECTOR-
The type of the None singleton.
Inherited members
class TableInfo (database_info: AstraDBDatabaseInfo,
keyspace: str,
name: str,
full_name: str)-
Expand source code
@dataclass class TableInfo: """ Represents the identifying information for a table, including the information about the database the table belongs to. Attributes: database_info: an AstraDBDatabaseInfo instance for the underlying database. keyspace: the keyspace where the table is located. name: table name. Unique within a keyspace (across tables/collections). full_name: identifier for the table within the database, in the form "keyspace.table_name". """ database_info: AstraDBDatabaseInfo keyspace: str name: str full_name: strRepresents the identifying information for a table, including the information about the database the table belongs to.
Attributes
database_info- an AstraDBDatabaseInfo instance for the underlying database.
keyspace- the keyspace where the table is located.
name- table name. Unique within a keyspace (across tables/collections).
full_name- identifier for the table within the database, in the form "keyspace.table_name".
Instance variables
var database_info : AstraDBDatabaseInfo-
The type of the None singleton.
var full_name : str-
The type of the None singleton.
var keyspace : str-
The type of the None singleton.
var name : str-
The type of the None singleton.
class TableKeyValuedColumnType (*args, **kwds)-
Expand source code
class TableKeyValuedColumnType(StrEnum): """ An enum to describe the types of column with "keys and values". """ MAP = "map"An enum to describe the types of column with "keys and values".
Ancestors
- StrEnum
- enum.Enum
Class variables
var MAP-
The type of the None singleton.
Inherited members
class TableKeyValuedColumnTypeDescriptor (*,
value_type: str | dict[str, Any] | ColumnType | TableColumnTypeDescriptor,
key_type: str | dict[str, Any] | ColumnType | TableColumnTypeDescriptor,
column_type: str | TableKeyValuedColumnType = TableKeyValuedColumnType.MAP,
api_support: TableAPISupportDescriptor | None = None)-
Expand source code
@dataclass class TableKeyValuedColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of a 'key-value' type, that stores an associative map (essentially a dict) between keys of a certain scalar type and values of a certain scalar type. The only such kind of column is a "map". Attributes: column_type: an instance of `TableKeyValuedColumnType`. When creating the object, this can be omitted as it only ever assumes the "MAP" value. key_type: the type of the individual keys in the map column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Using a column type not eligible to be a key will return a Data API error. value_type: the type of the individual items stored in the column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: TableKeyValuedColumnType key_type: TableColumnTypeDescriptor value_type: TableColumnTypeDescriptor def __init__( self, *, value_type: str | dict[str, Any] | ColumnType | TableColumnTypeDescriptor, key_type: str | dict[str, Any] | ColumnType | TableColumnTypeDescriptor, column_type: str | TableKeyValuedColumnType = TableKeyValuedColumnType.MAP, api_support: TableAPISupportDescriptor | None = None, ) -> None: self.key_type = TableColumnTypeDescriptor.coerce(key_type) self.value_type = TableColumnTypeDescriptor.coerce(value_type) super().__init__( column_type=TableKeyValuedColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return ( f"{self.__class__.__name__}({self.column_type.value}" f"<{self.key_type},{self.value_type}>)" ) def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "keyType": self.key_type.as_dict(), "valueType": self.value_type.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @override def as_spec(self) -> dict[str, Any]: return { k: v for k, v in { "type": self.column_type.value, "keyType": self.key_type.as_spec(), "valueType": self.value_type.as_spec(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableKeyValuedColumnTypeDescriptor: """ Create an instance of TableKeyValuedColumnTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"type", "keyType", "valueType", "apiSupport"} ) return TableKeyValuedColumnTypeDescriptor( column_type=raw_dict["type"], key_type=raw_dict["keyType"], value_type=raw_dict["valueType"], api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table, of a 'key-value' type, that stores an associative map (essentially a dict) between keys of a certain scalar type and values of a certain scalar type. The only such kind of column is a "map".
Attributes
column_type- an instance of
TableKeyValuedColumnType. When creating the object, this can be omitted as it only ever assumes the "MAP" value. key_type- the type of the individual keys in the map column.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. Using a column type not eligible to be a key will return a Data API error. value_type- the type of the individual items stored in the column.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Instance variables
var key_type : TableColumnTypeDescriptor-
The type of the None singleton.
var value_type : TableColumnTypeDescriptor-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "keyType": self.key_type.as_dict(), "valueType": self.value_type.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TablePassthroughColumnTypeDescriptor (*,
column_type: str | TablePassthroughColumnType = TablePassthroughColumnType.PASSTHROUGH,
raw_descriptor: dict[str, Any],
api_support: TableAPISupportDescriptor | None = None)-
Expand source code
@dataclass class TablePassthroughColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table wich, for lack of information or understanding from the client, needs to be conveyed to the caller as-is. This is used during certain (schema- or data-) read operations. This class is not meant for direct instantiation by the client user. Note that the `column_type` attribute is *not* mapped to the "type" key in the corresponding dictionary form. Attributes: column_type: a `TablePassthroughColumnType` value. This can be omitted when creating the object. It only ever assumes the "PASSTHROUGH" value. raw_descriptor: a free-form dictionary expressing the complete column description as it comes from the Data API. Part of this information is available in `api_support`, if such is provided. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: TablePassthroughColumnType raw_descriptor: dict[str, Any] api_support: TableAPISupportDescriptor def __init__( self, *, column_type: str | TablePassthroughColumnType = TablePassthroughColumnType.PASSTHROUGH, raw_descriptor: dict[str, Any], api_support: TableAPISupportDescriptor | None = None, ) -> None: self.raw_descriptor = raw_descriptor super().__init__( column_type=TablePassthroughColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self.as_dict())})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return self.raw_descriptor @classmethod def _from_dict( cls, raw_dict: dict[str, Any] ) -> TablePassthroughColumnTypeDescriptor: """ Create an instance of TablePassthroughColumnTypeDescriptor from a dictionary such as one from the Data API. """ return TablePassthroughColumnTypeDescriptor( raw_descriptor=raw_dict, api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table wich, for lack of information or understanding from the client, needs to be conveyed to the caller as-is. This is used during certain (schema- or data-) read operations.
This class is not meant for direct instantiation by the client user. Note that the
column_typeattribute is not mapped to the "type" key in the corresponding dictionary form.Attributes
column_type- a
TablePassthroughColumnTypevalue. This can be omitted when creating the object. It only ever assumes the "PASSTHROUGH" value. raw_descriptor- a free-form dictionary expressing the complete column
description as it comes from the Data API. Part of this information
is available in
api_support, if such is provided. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Instance variables
var raw_descriptor : dict[str, typing.Any]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return self.raw_descriptorRecast this object into a dictionary.
Inherited members
class TablePrimaryKeyDescriptor (partition_by: list[str], partition_sort: dict[str, int])-
Expand source code
@dataclass class TablePrimaryKeyDescriptor: """ Represents the part of a table definition that describes the primary key. See the docstring for class `CreateTableDefinition` for in-context usage examples. Attributes: partition_by: a list of column names forming the partition key, i.e. the portion of primary key that determines physical grouping and storage of rows on the database. Rows with the same values for the partition_by columns are guaranteed to be stored next to each other. This list cannot be empty. partition_sort: this defines how rows are to be sorted within a partition. It is a dictionary that specifies, for each column of the primary key not in the `partition_by` field, whether the sorting is ascending or descending (see the values in the `SortMode` constant). The sorting within a partition considers all columns in this dictionary, in a hierarchical way: hence, ordering in this dictionary is relevant. """ partition_by: list[str] partition_sort: dict[str, int] def __repr__(self) -> str: partition_key_block = ",".join(self.partition_by) clustering_block = ",".join( f"{clu_col_name}:{'a' if clu_col_sort > 0 else 'd'}" for clu_col_name, clu_col_sort in self.partition_sort.items() ) pk_block = f"({partition_key_block}){clustering_block}" return f"{self.__class__.__name__}[{pk_block}]" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "partitionBy": self.partition_by, "partitionSort": dict(self.partition_sort.items()), }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TablePrimaryKeyDescriptor: """ Create an instance of TablePrimaryKeyDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"partitionBy", "partitionSort"}) return TablePrimaryKeyDescriptor( partition_by=raw_dict["partitionBy"], partition_sort=raw_dict["partitionSort"], ) @classmethod def coerce( cls, raw_input: TablePrimaryKeyDescriptor | dict[str, Any] | str ) -> TablePrimaryKeyDescriptor: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TablePrimaryKeyDescriptor. """ if isinstance(raw_input, TablePrimaryKeyDescriptor): return raw_input elif isinstance(raw_input, str): return cls._from_dict({"partitionBy": [raw_input], "partitionSort": {}}) else: return cls._from_dict(raw_input)Represents the part of a table definition that describes the primary key.
See the docstring for class
CreateTableDefinitionfor in-context usage examples.Attributes
partition_by- a list of column names forming the partition key, i.e. the portion of primary key that determines physical grouping and storage of rows on the database. Rows with the same values for the partition_by columns are guaranteed to be stored next to each other. This list cannot be empty.
partition_sort- this defines how rows are to be sorted within a partition.
It is a dictionary that specifies, for each column of the primary key
not in the
partition_byfield, whether the sorting is ascending or descending (see the values in theSortModeconstant). The sorting within a partition considers all columns in this dictionary, in a hierarchical way: hence, ordering in this dictionary is relevant.
Static methods
def coerce(raw_input: TablePrimaryKeyDescriptor | dict[str, Any] | str) ‑> TablePrimaryKeyDescriptor-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TablePrimaryKeyDescriptor.
Instance variables
var partition_by : list[str]-
The type of the None singleton.
var partition_sort : dict[str, int]-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "partitionBy": self.partition_by, "partitionSort": dict(self.partition_sort.items()), }.items() if v is not None }Recast this object into a dictionary.
class TableScalarColumnTypeDescriptor (column_type: str | ColumnType,
api_support: TableAPISupportDescriptor | None = None)-
Expand source code
@dataclass class TableScalarColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of scalar type, i.e. which contains a single simple value. See the docstring for class `CreateTableDefinition` for in-context usage examples. Attributes: column_type: a `ColumnType` value. When creating the object, simple strings such as "TEXT" or "UUID" are also accepted. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: ColumnType def __init__( self, column_type: str | ColumnType, api_support: TableAPISupportDescriptor | None = None, ) -> None: super().__init__( column_type=ColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.column_type.value})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @override def as_spec(self) -> dict[str, Any] | str: if self.api_support is None: return self.column_type.value else: return self.as_dict() @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableScalarColumnTypeDescriptor: """ Create an instance of TableScalarColumnTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"type", "apiSupport"}) return TableScalarColumnTypeDescriptor( column_type=raw_dict["type"], api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table, of scalar type, i.e. which contains a single simple value.
See the docstring for class
CreateTableDefinitionfor in-context usage examples.Attributes
column_type- a
ColumnTypevalue. When creating the object, simple strings such as "TEXT" or "UUID" are also accepted. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TableTextIndexDefinition (column: str,
options: TableTextIndexOptions)-
Expand source code
@dataclass class TableTextIndexDefinition(TableBaseIndexDefinition): """ An object describing a text index definition, including the name of the indexed column and the index options. Attributes: column: the name of the indexed column. options: a `TableTextIndexOptions` detailing the index configuration. """ column: str options: TableTextIndexOptions def __init__( self, column: str, options: TableTextIndexOptions, ) -> None: self._index_type = TableIndexType.TEXT self.column = column self.options = options def __repr__(self) -> str: return f"{self.__class__.__name__}({self.column}, options={self.options})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v } @classmethod def _from_dict( cls, raw_dict: dict[str, Any], ) -> TableTextIndexDefinition: """ Create an instance of TableTextIndexDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"column", "options"}) return TableTextIndexDefinition( column=raw_dict["column"], options=TableTextIndexOptions.coerce(raw_dict.get("options") or {}), ) @classmethod def coerce( cls, raw_input: TableTextIndexDefinition | dict[str, Any], ) -> TableTextIndexDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexDefinition. """ if isinstance(raw_input, TableTextIndexDefinition): return raw_input else: _filled_raw_input = {**{"options": {}}, **raw_input} return cls._from_dict(_filled_raw_input)An object describing a text index definition, including the name of the indexed column and the index options.
Attributes
column- the name of the indexed column.
options- a
TableTextIndexOptionsdetailing the index configuration.
Ancestors
- TableBaseIndexDefinition
- abc.ABC
Static methods
def coerce(raw_input: TableTextIndexDefinition | dict[str, Any]) ‑> TableTextIndexDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexDefinition.
Instance variables
var options : TableTextIndexOptions-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v }Recast this object into a dictionary.
Inherited members
class TableTextIndexOptions (analyzer: str | dict[str, Any] | UnsetType = (unset))-
Expand source code
@dataclass class TableTextIndexOptions: """ An object describing the options for a table text index, which is the index that enables lexicographical matching on a text column. Both when creating indexes and retrieving index metadata from the API, instances of TableTextIndexOptions are used to express the corresponding index settings. Attributes: analyzer: A string describing a built-in analyzer, or a dictionary describing an analyzer configuration in full. """ analyzer: str | dict[str, Any] | UnsetType = _UNSET def __repr__(self) -> str: if isinstance(self.analyzer, UnsetType): return f"{self.__class__.__name__}()" else: analyzer_desc = ( self.analyzer if isinstance(self.analyzer, str) else f"{str(self.analyzer)[:25]}..." ) return f'{self.__class__.__name__}(analyzer_desc="{analyzer_desc}")' def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "analyzer": None if isinstance(self.analyzer, UnsetType) else self.analyzer, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableTextIndexOptions: """ Create an instance of TableTextIndexOptions from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"analyzer"}) return TableTextIndexOptions( analyzer=raw_dict["analyzer"] if raw_dict.get("analyzer") is not None else _UNSET, ) @classmethod def coerce( cls, raw_input: TableTextIndexOptions | dict[str, Any] | None ) -> TableTextIndexOptions: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableTextIndexOptions. """ if isinstance(raw_input, TableTextIndexOptions): return raw_input elif raw_input is None: return cls(analyzer=_UNSET) else: return cls._from_dict(raw_input)An object describing the options for a table text index, which is the index that enables lexicographical matching on a text column.
Both when creating indexes and retrieving index metadata from the API, instances of TableTextIndexOptions are used to express the corresponding index settings.
Attributes
analyzer- A string describing a built-in analyzer, or a dictionary describing an analyzer configuration in full.
Static methods
def coerce(raw_input: TableTextIndexOptions | dict[str, Any] | None) ‑> TableTextIndexOptions-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableTextIndexOptions.
Instance variables
var analyzer : str | dict[str, typing.Any] | UnsetType-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "analyzer": None if isinstance(self.analyzer, UnsetType) else self.analyzer, }.items() if v is not None }Recast this object into a dictionary.
class TableUDTColumnDescriptor (*,
udt_name: str,
column_type: str | TableUDTColumnType = TableUDTColumnType.USERDEFINED,
api_support: TableAPISupportDescriptor | None = None,
definition: CreateTypeDefinition | None = None)-
Expand source code
@dataclass class TableUDTColumnDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of a user-defined type (UDT) type, i.e. a previously-defined set of named fields, each with its type. See the docstring for class `CreateTableDefinition` for in-context usage examples. Attributes: column_type: a `TableUDTColumnType` value. This can be omitted when creating the object. It only ever assumes the "USERDEFINED" value. udt_name: the name of the user-defined type for this column. definition: a full type definition in the form of an object of type `astrapy.info.CreateTypeDefinition` object. This attribute is optional, and as a matter of fact is only present in the context of data reads, to provide a complete schema for the data returned from the Data API within the 'projectionSchema' out-of-band information coming with the read. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: TableUDTColumnType definition: CreateTypeDefinition | None udt_name: str def __init__( self, *, udt_name: str, column_type: str | TableUDTColumnType = TableUDTColumnType.USERDEFINED, api_support: TableAPISupportDescriptor | None = None, definition: CreateTypeDefinition | None = None, ) -> None: # lazy-import here to avoid circular import issues from astrapy.data.info.table_descriptor.type_creation import ( CreateTypeDefinition, ) self.udt_name = udt_name self.definition = ( None if definition is None else CreateTypeDefinition.coerce(definition) ) super().__init__( column_type=TableUDTColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.udt_name})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "udtName": self.udt_name, "definition": self.definition.as_dict() if self.definition else None, "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableUDTColumnDescriptor: """ Create an instance of TableUDTColumnDescriptor from a dictionary such as one from the Data API. """ # lazy-import here to avoid circular import issues from astrapy.data.info.table_descriptor.type_creation import ( CreateTypeDefinition, ) _warn_residual_keys( cls, raw_dict, {"type", "udtName", "apiSupport", "definition"}, ) return TableUDTColumnDescriptor( column_type=raw_dict["type"], udt_name=raw_dict["udtName"], definition=CreateTypeDefinition._from_dict(raw_dict["definition"]) if raw_dict.get("definition") else None, api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table, of a user-defined type (UDT) type, i.e. a previously-defined set of named fields, each with its type.
See the docstring for class
CreateTableDefinitionfor in-context usage examples.Attributes
column_type- a
TableUDTColumnTypevalue. This can be omitted when creating the object. It only ever assumes the "USERDEFINED" value. udt_name- the name of the user-defined type for this column.
definition- a full type definition in the form of an object of type
CreateTypeDefinitionobject. This attribute is optional, and as a matter of fact is only present in the context of data reads, to provide a complete schema for the data returned from the Data API within the 'projectionSchema' out-of-band information coming with the read. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Instance variables
var definition : CreateTypeDefinition | None-
The type of the None singleton.
var udt_name : str-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "udtName": self.udt_name, "definition": self.definition.as_dict() if self.definition else None, "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TableUnsupportedColumnTypeDescriptor (*,
column_type: TableUnsupportedColumnType | str,
api_support: TableAPISupportDescriptor)-
Expand source code
@dataclass class TableUnsupportedColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of unsupported type. Note that this column type descriptor cannot be used in table creation, rather it can only be returned when listing the tables or getting their metadata by the API. Attributes: column_type: an instance of `TableUnsupportedColumnType`. api_support: a `TableAPISupportDescriptor` object giving more details. This class has no `coerce` method, since it is always only found in API responses. """ column_type: TableUnsupportedColumnType api_support: TableAPISupportDescriptor def __init__( self, *, column_type: TableUnsupportedColumnType | str, api_support: TableAPISupportDescriptor, ) -> None: super().__init__( column_type=TableUnsupportedColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.api_support.cql_definition})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "apiSupport": self.api_support.as_dict(), }.items() if v is not None } @classmethod def _from_dict( cls, raw_dict: dict[str, Any] ) -> TableUnsupportedColumnTypeDescriptor: """ Create an instance of TableUnsupportedColumnTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"type", "apiSupport"}) return TableUnsupportedColumnTypeDescriptor( column_type=raw_dict["type"], api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]), )Represents and describes a column in a Table, of unsupported type.
Note that this column type descriptor cannot be used in table creation, rather it can only be returned when listing the tables or getting their metadata by the API.
Attributes
column_type- an instance of
TableUnsupportedColumnType. api_support- a
TableAPISupportDescriptorobject giving more details.
This class has no
coercemethod, since it is always only found in API responses.Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "apiSupport": self.api_support.as_dict(), }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TableUnsupportedIndexDefinition (column: str,
api_support: TableAPIIndexSupportDescriptor)-
Expand source code
@dataclass class TableUnsupportedIndexDefinition(TableBaseIndexDefinition): """ An object describing the definition of an unsupported index found on a table, including the name of the indexed column and the index support status. Attributes: column: the name of the indexed column. api_support: a `TableAPIIndexSupportDescriptor` detailing the level of support for the index by the Data API. """ column: str api_support: TableAPIIndexSupportDescriptor def __init__( self, column: str, api_support: TableAPIIndexSupportDescriptor, ) -> None: self._index_type = TableIndexType.UNKNOWN self.column = column self.api_support = api_support def __repr__(self) -> str: return f"{self.__class__.__name__}({self.api_support.cql_definition})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "column": self.column, "apiSupport": self.api_support.as_dict(), } @classmethod def _from_dict( cls, raw_dict: dict[str, Any], ) -> TableUnsupportedIndexDefinition: """ Create an instance of TableIndexDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"column", "apiSupport"}) return TableUnsupportedIndexDefinition( column=raw_dict["column"], api_support=TableAPIIndexSupportDescriptor._from_dict( raw_dict["apiSupport"] ), ) @classmethod def coerce( cls, raw_input: TableUnsupportedIndexDefinition | dict[str, Any], ) -> TableUnsupportedIndexDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableUnsupportedIndexDefinition. """ if isinstance(raw_input, TableUnsupportedIndexDefinition): return raw_input else: return cls._from_dict(raw_input)An object describing the definition of an unsupported index found on a table, including the name of the indexed column and the index support status.
Attributes
column- the name of the indexed column.
api_support- a
TableAPIIndexSupportDescriptordetailing the level of support for the index by the Data API.
Ancestors
- TableBaseIndexDefinition
- abc.ABC
Static methods
def coerce(raw_input: TableUnsupportedIndexDefinition | dict[str, Any]) ‑> TableUnsupportedIndexDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableUnsupportedIndexDefinition.
Instance variables
var api_support : TableAPIIndexSupportDescriptor-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { "column": self.column, "apiSupport": self.api_support.as_dict(), }Recast this object into a dictionary.
Inherited members
class TableValuedColumnType (*args, **kwds)-
Expand source code
class TableValuedColumnType(StrEnum): """ An enum to describe the types of column with "values". """ LIST = "list" SET = "set"An enum to describe the types of column with "values".
Ancestors
- StrEnum
- enum.Enum
Class variables
var LIST-
The type of the None singleton.
var SET-
The type of the None singleton.
Inherited members
class TableValuedColumnTypeDescriptor (*,
column_type: str | TableValuedColumnType,
value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor,
api_support: TableAPISupportDescriptor | None = None)-
Expand source code
@dataclass class TableValuedColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of a 'valued' type that stores multiple values. This means either a list or a set of homogeneous items. See the docstring for class `CreateTableDefinition` for in-context usage examples. Attributes: column_type: an instance of `TableValuedColumnType`. When creating the object, simple strings such as "list" or "set" are also accepted. value_type: the type of the individual items stored in the column. This is a `TableColumnTypeDescriptor`, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: TableValuedColumnType value_type: TableColumnTypeDescriptor def __init__( self, *, column_type: str | TableValuedColumnType, value_type: str | dict[Any, str] | ColumnType | TableColumnTypeDescriptor, api_support: TableAPISupportDescriptor | None = None, ) -> None: self.value_type = TableColumnTypeDescriptor.coerce(value_type) super().__init__( column_type=TableValuedColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self.column_type.value}<{self.value_type}>)" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "valueType": self.value_type.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @override def as_spec(self) -> dict[str, Any] | str: return { k: v for k, v in { "type": self.column_type.value, "valueType": self.value_type.as_spec(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableValuedColumnTypeDescriptor: """ Create an instance of TableValuedColumnTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"type", "valueType", "apiSupport"}) return TableValuedColumnTypeDescriptor( column_type=raw_dict["type"], value_type=raw_dict["valueType"], api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table, of a 'valued' type that stores multiple values. This means either a list or a set of homogeneous items.
See the docstring for class
CreateTableDefinitionfor in-context usage examples.Attributes
column_type- an instance of
TableValuedColumnType. When creating the object, simple strings such as "list" or "set" are also accepted. value_type- the type of the individual items stored in the column.
This is a
TableColumnTypeDescriptor, but when creating the object, equivalent dictionaries, as well as strings such as "TEXT" or "UUID" or ColumnType entries, are also accepted. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Instance variables
var value_type : TableColumnTypeDescriptor-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "valueType": self.value_type.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TableVectorColumnTypeDescriptor (*,
column_type: str | TableVectorColumnType = TableVectorColumnType.VECTOR,
dimension: int | None,
service: VectorServiceOptions | None = None,
api_support: TableAPISupportDescriptor | None = None)-
Expand source code
@dataclass class TableVectorColumnTypeDescriptor(TableColumnTypeDescriptor): """ Represents and describes a column in a Table, of vector type, i.e. which contains a list of `dimension` floats that is treated specially as a "vector". See the docstring for class `CreateTableDefinition` for in-context usage examples. Attributes: column_type: a `TableVectorColumnType` value. This can be omitted when creating the object. It only ever assumes the "VECTOR" value. dimension: an integer, the number of components (numbers) in the vectors. This can be left unspecified in some cases of vectorize-enabled columns. service: an optional `VectorServiceOptions` object defining the vectorize settings (i.e. server-side embedding computation) for the column. api_support: a `TableAPISupportDescriptor` object giving more details. """ column_type: TableVectorColumnType dimension: int | None service: VectorServiceOptions | None def __init__( self, *, column_type: str | TableVectorColumnType = TableVectorColumnType.VECTOR, dimension: int | None, service: VectorServiceOptions | None = None, api_support: TableAPISupportDescriptor | None = None, ) -> None: self.dimension = dimension self.service = service super().__init__( column_type=TableVectorColumnType.coerce(column_type), api_support=api_support, ) def __repr__(self) -> str: not_null_pieces = [ pc for pc in [ f"dimension={self.dimension}" if self.dimension is not None else None, None if self.service is None else f"service={self.service}", ] if pc is not None ] inner_desc = ", ".join(not_null_pieces) return f"{self.__class__.__name__}({self.column_type.value}[{inner_desc}])" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "dimension": self.dimension, "service": None if self.service is None else self.service.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableVectorColumnTypeDescriptor: """ Create an instance of TableVectorColumnTypeDescriptor from a dictionary such as one from the Data API. """ _warn_residual_keys( cls, raw_dict, {"type", "dimension", "service", "apiSupport"}, ) return TableVectorColumnTypeDescriptor( column_type=raw_dict["type"], dimension=raw_dict.get("dimension"), service=VectorServiceOptions.coerce(raw_dict.get("service")), api_support=TableAPISupportDescriptor._from_dict(raw_dict["apiSupport"]) if raw_dict.get("apiSupport") else None, )Represents and describes a column in a Table, of vector type, i.e. which contains a list of
dimensionfloats that is treated specially as a "vector".See the docstring for class
CreateTableDefinitionfor in-context usage examples.Attributes
column_type- a
TableVectorColumnTypevalue. This can be omitted when creating the object. It only ever assumes the "VECTOR" value. dimension- an integer, the number of components (numbers) in the vectors. This can be left unspecified in some cases of vectorize-enabled columns.
service- an optional
VectorServiceOptionsobject defining the vectorize settings (i.e. server-side embedding computation) for the column. api_support- a
TableAPISupportDescriptorobject giving more details.
Ancestors
- TableColumnTypeDescriptor
- abc.ABC
Instance variables
var dimension : int | None-
The type of the None singleton.
var service : VectorServiceOptions | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "type": self.column_type.value, "dimension": self.dimension, "service": None if self.service is None else self.service.as_dict(), "apiSupport": self.api_support.as_dict() if self.api_support else None, }.items() if v is not None }Recast this object into a dictionary.
Inherited members
class TableVectorIndexDefinition (column: str,
options: TableVectorIndexOptions)-
Expand source code
@dataclass class TableVectorIndexDefinition(TableBaseIndexDefinition): """ An object describing a vector index definition, including the name of the indexed column and the index options. Attributes: column: the name of the indexed column. options: a `TableVectorIndexOptions` detailing the index configuration. """ column: str options: TableVectorIndexOptions def __init__( self, column: str, options: TableVectorIndexOptions, ) -> None: self._index_type = TableIndexType.VECTOR self.column = column self.options = options def __repr__(self) -> str: return f"{self.__class__.__name__}({self.column}, options={self.options})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v } @classmethod def _from_dict( cls, raw_dict: dict[str, Any], ) -> TableVectorIndexDefinition: """ Create an instance of TableVectorIndexDefinition from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"column", "options"}) return TableVectorIndexDefinition( column=raw_dict["column"], options=TableVectorIndexOptions.coerce(raw_dict.get("options") or {}), ) @classmethod def coerce( cls, raw_input: TableVectorIndexDefinition | dict[str, Any], ) -> TableVectorIndexDefinition: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexDefinition. """ if isinstance(raw_input, TableVectorIndexDefinition): return raw_input else: _filled_raw_input = {**{"options": {}}, **raw_input} return cls._from_dict(_filled_raw_input)An object describing a vector index definition, including the name of the indexed column and the index options.
Attributes
column- the name of the indexed column.
options- a
TableVectorIndexOptionsdetailing the index configuration.
Ancestors
- TableBaseIndexDefinition
- abc.ABC
Static methods
def coerce(raw_input: TableVectorIndexDefinition | dict[str, Any]) ‑> TableVectorIndexDefinition-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexDefinition.
Instance variables
var options : TableVectorIndexOptions-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "column": self.column, "options": self.options.as_dict(), }.items() if v }Recast this object into a dictionary.
Inherited members
class TableVectorIndexOptions (metric: str | UnsetType = (unset), source_model: str | UnsetType = (unset))-
Expand source code
@dataclass class TableVectorIndexOptions: """ An object describing the options for a table vector index, which is the index that enables vector (ANN) search on a column. Both when creating indexes and retrieving index metadata from the API, instances of TableVectorIndexOptions are used to express the corresponding index settings. Attributes: metric: the similarity metric used in the index. It must be one of the strings defined in `astrapy.constants.VectorMetric` (such as "dot_product"). source_model: an optional parameter to help the index pick the set of parameters best suited to a specific embedding model. If omitted, the Data API will use its defaults. See the Data API documentation for more details. """ metric: str | UnsetType = _UNSET source_model: str | UnsetType = _UNSET def __repr__(self) -> str: not_null_pieces = [ pc for pc in ( None if isinstance(self.metric, UnsetType) else f"metric={self.metric}", None if isinstance(self.source_model, UnsetType) else f"source_model={self.source_model}", ) if pc is not None ] inner_desc = ", ".join(not_null_pieces) return f"{self.__class__.__name__}({inner_desc})" def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "metric": None if isinstance(self.metric, UnsetType) else self.metric, "sourceModel": None if isinstance(self.source_model, UnsetType) else self.source_model, }.items() if v is not None } @classmethod def _from_dict(cls, raw_dict: dict[str, Any]) -> TableVectorIndexOptions: """ Create an instance of TableVectorIndexOptions from a dictionary such as one from the Data API. """ _warn_residual_keys(cls, raw_dict, {"metric", "sourceModel"}) return TableVectorIndexOptions( metric=raw_dict["metric"] if raw_dict.get("metric") is not None else _UNSET, source_model=raw_dict["sourceModel"] if raw_dict.get("sourceModel") is not None else _UNSET, ) @classmethod def coerce( cls, raw_input: TableVectorIndexOptions | dict[str, Any] | None ) -> TableVectorIndexOptions: """ Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexOptions. """ if isinstance(raw_input, TableVectorIndexOptions): return raw_input elif raw_input is None: return cls(metric=_UNSET, source_model=_UNSET) else: return cls._from_dict(raw_input)An object describing the options for a table vector index, which is the index that enables vector (ANN) search on a column.
Both when creating indexes and retrieving index metadata from the API, instances of TableVectorIndexOptions are used to express the corresponding index settings.
Attributes
metric- the similarity metric used in the index. It must be one of the strings
defined in
VectorMetric(such as "dot_product"). source_model- an optional parameter to help the index pick the set of parameters best suited to a specific embedding model. If omitted, the Data API will use its defaults. See the Data API documentation for more details.
Static methods
def coerce(raw_input: TableVectorIndexOptions | dict[str, Any] | None) ‑> TableVectorIndexOptions-
Normalize the input, whether an object already or a plain dictionary of the right structure, into a TableVectorIndexOptions.
Instance variables
var metric : str | UnsetType-
The type of the None singleton.
var source_model : str | UnsetType-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "metric": None if isinstance(self.metric, UnsetType) else self.metric, "sourceModel": None if isinstance(self.source_model, UnsetType) else self.source_model, }.items() if v is not None }Recast this object into a dictionary.
class VectorServiceOptions (provider: str | None,
model_name: str | None,
authentication: dict[str, Any] | None = None,
parameters: dict[str, Any] | None = None)-
Expand source code
@dataclass class VectorServiceOptions: """ The "vector.service" component of the collection options. See the Data API specifications for allowed values. Attributes: provider: the name of a service provider for embedding calculation. model_name: the name of a specific model for use by the service. authentication: a key-value dictionary for the "authentication" specification, if any, in the vector service options. parameters: a key-value dictionary for the "parameters" specification, if any, in the vector service options. """ provider: str | None model_name: str | None authentication: dict[str, Any] | None = None parameters: dict[str, Any] | None = None def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "provider": self.provider, "modelName": self.model_name, "authentication": self.authentication, "parameters": self.parameters, }.items() if v is not None } @staticmethod def _from_dict( raw_dict: dict[str, Any] | None, ) -> VectorServiceOptions | None: """ Create an instance of VectorServiceOptions from a dictionary such as one from the Data API. """ if raw_dict is not None: return VectorServiceOptions( provider=raw_dict.get("provider"), model_name=raw_dict.get("modelName"), authentication=raw_dict.get("authentication"), parameters=raw_dict.get("parameters"), ) else: return None @staticmethod def coerce( raw_input: VectorServiceOptions | dict[str, Any] | None, ) -> VectorServiceOptions | None: if isinstance(raw_input, VectorServiceOptions): return raw_input else: return VectorServiceOptions._from_dict(raw_input)The "vector.service" component of the collection options. See the Data API specifications for allowed values.
Attributes
provider- the name of a service provider for embedding calculation.
model_name- the name of a specific model for use by the service.
authentication- a key-value dictionary for the "authentication" specification, if any, in the vector service options.
parameters- a key-value dictionary for the "parameters" specification, if any, in the vector service options.
Static methods
def coerce(raw_input: VectorServiceOptions | dict[str, Any] | None) ‑> VectorServiceOptions | None-
Expand source code
@staticmethod def coerce( raw_input: VectorServiceOptions | dict[str, Any] | None, ) -> VectorServiceOptions | None: if isinstance(raw_input, VectorServiceOptions): return raw_input else: return VectorServiceOptions._from_dict(raw_input)
Instance variables
var authentication : dict[str, typing.Any] | None-
The type of the None singleton.
var model_name : str | None-
The type of the None singleton.
var parameters : dict[str, typing.Any] | None-
The type of the None singleton.
var provider : str | None-
The type of the None singleton.
Methods
def as_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def as_dict(self) -> dict[str, Any]: """Recast this object into a dictionary.""" return { k: v for k, v in { "provider": self.provider, "modelName": self.model_name, "authentication": self.authentication, "parameters": self.parameters, }.items() if v is not None }Recast this object into a dictionary.