Embeddings
Embeddings models are used to convert text into numerical vectors. These vectors can be used for various tasks such as similarity search, clustering, and classification.
AI/ML
This component generates embeddings using the AI/ML API.
Parameters
Name | Type | Description |
---|---|---|
model_name |
String |
The name of the AI/ML embedding model to use |
aiml_api_key |
SecretString |
API key for authenticating with the AI/ML service |
Name | Type | Description |
---|---|---|
embeddings |
Embeddings |
An instance of AIMLEmbeddingsImpl for generating embeddings |
Component code
AIML.py
from langflow.base.embeddings.model import LCEmbeddingsModel
from langflow.base.models.aiml_constants import AIML_EMBEDDING_MODELS
from langflow.components.embeddings.util.AIMLEmbeddingsImpl import AIMLEmbeddingsImpl
from langflow.field_typing import Embeddings
from langflow.inputs.inputs import DropdownInput
from langflow.io import SecretStrInput
class AIMLEmbeddingsComponent(LCEmbeddingsModel):
display_name = "AI/ML Embeddings"
description = "Generate embeddings using the AI/ML API."
icon = "AI/ML"
name = "AIMLEmbeddings"
inputs = [
DropdownInput(
name="model_name",
display_name="Model Name",
options=AIML_EMBEDDING_MODELS,
required=True,
),
SecretStrInput(
name="aiml_api_key",
display_name="AI/ML API Key",
value="AIML_API_KEY",
required=True,
),
]
def build_embeddings(self) -> Embeddings:
return AIMLEmbeddingsImpl(
api_key=self.aiml_api_key,
model=self.model_name,
)
Amazon Bedrock Embeddings
Use this component to load embedding models and generate embeddings with Amazon Bedrock.
This component requires an AWS account and access to Amazon Bedrock.
Parameters
Name | Display Name | Info |
---|---|---|
credentials_profile_name |
AWS Credentials Profile |
Name of the AWS credentials profile in ~/.aws/credentials or ~/.aws/config |
model_id |
Model ID |
ID of the model to call, e.g., amazon.titan-embed-text-v1 |
endpoint_url |
Endpoint URL |
URL to set a specific service endpoint other than the default AWS endpoint |
region_name |
AWS Region |
AWS region to use, e.g., us-west-2 |
Component code
AmazonBedrock.py
from langchain_community.embeddings import BedrockEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.inputs import SecretStrInput
from langflow.io import DropdownInput, MessageTextInput, Output
class AmazonBedrockEmbeddingsComponent(LCModelComponent):
display_name: str = "Amazon Bedrock Embeddings"
description: str = "Generate embeddings using Amazon Bedrock models."
documentation = "https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/bedrock"
icon = "Amazon"
name = "AmazonBedrockEmbeddings"
inputs = [
DropdownInput(
name="model_id",
display_name="Model Id",
options=["amazon.titan-embed-text-v1"],
value="amazon.titan-embed-text-v1",
),
SecretStrInput(name="aws_access_key", display_name="Access Key"),
SecretStrInput(name="aws_secret_key", display_name="Secret Key"),
MessageTextInput(
name="credentials_profile_name",
display_name="Credentials Profile Name",
advanced=True,
),
MessageTextInput(name="region_name", display_name="Region Name", value="us-east-1"),
MessageTextInput(name="endpoint_url", display_name=" Endpoint URL", advanced=True),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
if self.aws_access_key:
import boto3 # type: ignore
session = boto3.Session(
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key,
)
elif self.credentials_profile_name:
import boto3
session = boto3.Session(profile_name=self.credentials_profile_name)
else:
import boto3
session = boto3.Session()
client_params = {}
if self.endpoint_url:
client_params["endpoint_url"] = self.endpoint_url
if self.region_name:
client_params["region_name"] = self.region_name
boto3_client = session.client("bedrock-runtime", **client_params)
output = BedrockEmbeddings(
credentials_profile_name=self.credentials_profile_name,
client=boto3_client,
model_id=self.model_id,
endpoint_url=self.endpoint_url,
region_name=self.region_name,
) # type: ignore
return output
Astra DB vectorize
Use this component to generates embeddings with Astra DB vectorize.
This component requires that your Astra DB database has a collection that uses a vectorize embedding provider integration. For more information and instructions, see Auto-generate embeddings with vectorize.
Parameters
Name | Display Name | Info |
---|---|---|
provider |
Embedding Provider |
The embedding provider to use |
model_name |
Model Name |
The embedding model to use |
authentication |
Authentication |
The name of the API key in Astra KMS that stores your vectorize embedding provider credentials. (Not required if using an Astra-hosted embedding provider.) |
provider_api_key |
Provider API Key |
As an alternative to |
model_parameters |
Model Parameters |
Additional model parameters |
Component code
AstraVectorize.py
from typing import Any
from langflow.custom import Component
from langflow.inputs.inputs import DictInput, DropdownInput, MessageTextInput, SecretStrInput
from langflow.template.field.base import Output
class AstraVectorizeComponent(Component):
display_name: str = "Astra Vectorize"
description: str = "Configuration options for Astra Vectorize server-side embeddings."
documentation: str = "https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html"
icon = "AstraDB"
name = "AstraVectorize"
VECTORIZE_PROVIDERS_MAPPING = {
"Azure OpenAI": ["azureOpenAI", ["text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002"]],
"Hugging Face - Dedicated": ["huggingfaceDedicated", ["endpoint-defined-model"]],
"Hugging Face - Serverless": [
"huggingface",
[
"sentence-transformers/all-MiniLM-L6-v2",
"intfloat/multilingual-e5-large",
"intfloat/multilingual-e5-large-instruct",
"BAAI/bge-small-en-v1.5",
"BAAI/bge-base-en-v1.5",
"BAAI/bge-large-en-v1.5",
],
],
"Jina AI": [
"jinaAI",
[
"jina-embeddings-v2-base-en",
"jina-embeddings-v2-base-de",
"jina-embeddings-v2-base-es",
"jina-embeddings-v2-base-code",
"jina-embeddings-v2-base-zh",
],
],
"Mistral AI": ["mistral", ["mistral-embed"]],
"NVIDIA": ["nvidia", ["NV-Embed-QA"]],
"OpenAI": ["openai", ["text-embedding-3-small", "text-embedding-3-large", "text-embedding-ada-002"]],
"Upstage": ["upstageAI", ["solar-embedding-1-large"]],
"Voyage AI": [
"voyageAI",
["voyage-large-2-instruct", "voyage-law-2", "voyage-code-2", "voyage-large-2", "voyage-2"],
],
}
VECTORIZE_MODELS_STR = "\n\n".join(
[provider + ": " + (", ".join(models[1])) for provider, models in VECTORIZE_PROVIDERS_MAPPING.items()]
)
inputs = [
DropdownInput(
name="provider",
display_name="Provider",
options=VECTORIZE_PROVIDERS_MAPPING.keys(),
value="",
required=True,
),
MessageTextInput(
name="model_name",
display_name="Model Name",
info=f"The embedding model to use for the selected provider. Each provider has a different set of models "
f"available (full list at https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\n\n{VECTORIZE_MODELS_STR}",
required=True,
),
MessageTextInput(
name="api_key_name",
display_name="API Key name",
info="The name of the embeddings provider API key stored on Astra. If set, it will override the 'ProviderKey' in the authentication parameters.",
),
DictInput(
name="authentication",
display_name="Authentication parameters",
is_list=True,
advanced=True,
),
SecretStrInput(
name="provider_api_key",
display_name="Provider API Key",
info="An alternative to the Astra Authentication that passes an API key for the provider with each request to Astra DB. This may be used when Vectorize is configured for the collection, but no corresponding provider secret is stored within Astra's key management system.",
advanced=True,
),
DictInput(
name="authentication",
display_name="Authentication Parameters",
is_list=True,
advanced=True,
),
DictInput(
name="model_parameters",
display_name="Model Parameters",
advanced=True,
is_list=True,
),
]
outputs = [
Output(display_name="Vectorize", name="config", method="build_options", types=["dict"]),
]
def build_options(self) -> dict[str, Any]:
provider_value = self.VECTORIZE_PROVIDERS_MAPPING[self.provider][0]
authentication = {**(self.authentication or {})}
api_key_name = self.api_key_name
if api_key_name:
authentication["providerKey"] = api_key_name
return {
# must match astrapy.info.CollectionVectorServiceOptions
"collection_vector_service_options": {
"provider": provider_value,
"modelName": self.model_name,
"authentication": authentication,
"parameters": self.model_parameters or {},
},
"collection_embedding_api_key": self.provider_api_key,
}
Azure OpenAI Embeddings
This component generates embeddings using Azure OpenAI models.
Use this component to create embeddings with Azure’s OpenAI service.
Make sure you have the necessary Azure credentials and have set up the OpenAI resource.
Parameters
Name | Display Name | Info |
---|---|---|
Azure Endpoint |
Azure Endpoint |
Your Azure endpoint, including the resource |
Deployment Name |
Deployment Name |
The name of the deployment |
API Version |
API Version |
The API version to use |
API Key |
API Key |
The API key to access the Azure OpenAI service |
Component code
AzureOpenAI.py
from langchain_openai import AzureOpenAIEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
class AzureOpenAIEmbeddingsComponent(LCModelComponent):
display_name: str = "Azure OpenAI Embeddings"
description: str = "Generate embeddings using Azure OpenAI models."
documentation: str = "https://python.langchain.com/docs/integrations/text_embedding/azureopenai"
icon = "Azure"
name = "AzureOpenAIEmbeddings"
API_VERSION_OPTIONS = [
"2022-12-01",
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
]
inputs = [
MessageTextInput(
name="azure_endpoint",
display_name="Azure Endpoint",
required=True,
info="Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`",
),
MessageTextInput(
name="azure_deployment",
display_name="Deployment Name",
required=True,
),
DropdownInput(
name="api_version",
display_name="API Version",
options=API_VERSION_OPTIONS,
value=API_VERSION_OPTIONS[-1],
advanced=True,
),
SecretStrInput(
name="api_key",
display_name="API Key",
required=True,
),
IntInput(
name="dimensions",
display_name="Dimensions",
info="The number of dimensions the resulting output embeddings should have. Only supported by certain models.",
advanced=True,
),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
try:
embeddings = AzureOpenAIEmbeddings(
azure_endpoint=self.azure_endpoint,
azure_deployment=self.azure_deployment,
api_version=self.api_version,
api_key=self.api_key,
dimensions=self.dimensions or None,
)
except Exception as e:
raise ValueError(f"Could not connect to AzureOpenAIEmbeddings API: {str(e)}") from e
return embeddings
Cohere Embeddings
This component loads embedding models from Cohere.
Use this component to generate embeddings using Cohere’s AI models.
Ensure you have a valid Cohere API key.
Parameters
Name | Display Name | Info |
---|---|---|
cohere_api_key |
Cohere API Key |
API key required to authenticate with the Cohere service |
model |
Model Name |
Language model used for embedding text documents and performing queries |
truncate |
Truncate |
Whether to truncate the input text to fit within the model’s constraints |
Component code
Cohere.py
from langchain_community.embeddings.cohere import CohereEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, Output, SecretStrInput
class CohereEmbeddingsComponent(LCModelComponent):
display_name = "Cohere Embeddings"
description = "Generate embeddings using Cohere models."
icon = "Cohere"
name = "CohereEmbeddings"
inputs = [
SecretStrInput(name="cohere_api_key", display_name="Cohere API Key"),
DropdownInput(
name="model",
display_name="Model",
advanced=True,
options=[
"embed-english-v2.0",
"embed-multilingual-v2.0",
"embed-english-light-v2.0",
"embed-multilingual-light-v2.0",
],
value="embed-english-v2.0",
),
MessageTextInput(name="truncate", display_name="Truncate", advanced=True),
IntInput(name="max_retries", display_name="Max Retries", value=3, advanced=True),
MessageTextInput(name="user_agent", display_name="User Agent", advanced=True, value="langchain"),
FloatInput(name="request_timeout", display_name="Request Timeout", advanced=True),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
return CohereEmbeddings( # type: ignore
cohere_api_key=self.cohere_api_key,
model=self.model,
truncate=self.truncate,
max_retries=self.max_retries,
user_agent=self.user_agent,
request_timeout=self.request_timeout or None,
)
Hugging Face Embeddings
This component loads embedding models from HuggingFace.
Use this component to generate embeddings using locally downloaded Hugging Face models. Ensure you have sufficient computational resources to run the models.
Parameters
Name | Display Name | Info |
---|---|---|
Cache Folder |
Cache Folder |
Folder path to cache HuggingFace models |
Encode Kwargs |
Encoding Arguments |
Additional arguments for the encoding process |
Model Kwargs |
Model Arguments |
Additional arguments for the model |
Model Name |
Model Name |
Name of the HuggingFace model to use |
Multi Process |
Multi-Process |
Whether to use multiple processes |
Component code
HuggingFaceInferenceAPI.py
from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEmbeddings
from pydantic.v1.types import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import MessageTextInput, Output, SecretStrInput
class HuggingFaceInferenceAPIEmbeddingsComponent(LCModelComponent):
display_name = "HuggingFace Embeddings"
description = "Generate embeddings using Hugging Face Inference API models."
documentation = "https://github.com/huggingface/text-embeddings-inference"
icon = "HuggingFace"
name = "HuggingFaceInferenceAPIEmbeddings"
inputs = [
SecretStrInput(name="api_key", display_name="API Key"),
MessageTextInput(name="api_url", display_name="API URL", advanced=True, value="http://localhost:8080"),
MessageTextInput(name="model_name", display_name="Model Name", value="BAAI/bge-large-en-v1.5"),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
if not self.api_key:
raise ValueError("API Key is required")
api_key = SecretStr(self.api_key)
return HuggingFaceInferenceAPIEmbeddings(api_key=api_key, api_url=self.api_url, model_name=self.model_name)
Hugging Face API Embeddings
This component generates embeddings using Hugging Face Inference API models.
Use this component to create embeddings with Hugging Face’s hosted models. Ensure you have a valid Hugging Face API key.
Parameters
Name | Display Name | Info |
---|---|---|
API Key |
API Key |
API key for accessing the Hugging Face Inference API |
API URL |
API URL |
URL of the Hugging Face Inference API |
Model Name |
Model Name |
Name of the model to use for embeddings |
Cache Folder |
Cache Folder |
Folder path to cache Hugging Face models |
Encode Kwargs |
Encoding Arguments |
Additional arguments for the encoding process |
Model Kwargs |
Model Arguments |
Additional arguments for the model |
Multi Process |
Multi-Process |
Whether to use multiple processes |
Component code
HuggingFaceInferenceAPI.py
from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEmbeddings
from pydantic.v1.types import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import MessageTextInput, Output, SecretStrInput
class HuggingFaceInferenceAPIEmbeddingsComponent(LCModelComponent):
display_name = "HuggingFace Embeddings"
description = "Generate embeddings using Hugging Face Inference API models."
documentation = "https://github.com/huggingface/text-embeddings-inference"
icon = "HuggingFace"
name = "HuggingFaceInferenceAPIEmbeddings"
inputs = [
SecretStrInput(name="api_key", display_name="API Key"),
MessageTextInput(name="api_url", display_name="API URL", advanced=True, value="http://localhost:8080"),
MessageTextInput(name="model_name", display_name="Model Name", value="BAAI/bge-large-en-v1.5"),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
if not self.api_key:
raise ValueError("API Key is required")
api_key = SecretStr(self.api_key)
return HuggingFaceInferenceAPIEmbeddings(api_key=api_key, api_url=self.api_url, model_name=self.model_name)
MistralAI Embeddings
This component generates embeddings using MistralAI models.
Parameters
Name | Type | Description |
---|---|---|
model |
String |
The MistralAI model to use (default: "mistral-embed") |
mistral_api_key |
SecretString |
API key for authenticating with MistralAI |
max_concurrent_requests |
Integer |
Maximum number of concurrent API requests (default: 64) |
max_retries |
Integer |
Maximum number of retry attempts for failed requests (default: 5) |
timeout |
Integer |
Request timeout in seconds (default: 120) |
endpoint |
String |
Custom API endpoint URL (default: "https://api.mistral.ai/v1/") |
Name | Type | Description |
---|---|---|
embeddings |
Embeddings |
MistralAIEmbeddings instance for generating embeddings |
Component code
MistalAI.py
from langchain_mistralai.embeddings import MistralAIEmbeddings
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
class MistralAIEmbeddingsComponent(LCModelComponent):
display_name = "MistralAI Embeddings"
description = "Generate embeddings using MistralAI models."
icon = "MistralAI"
name = "MistalAIEmbeddings"
inputs = [
DropdownInput(
name="model",
display_name="Model",
advanced=False,
options=["mistral-embed"],
value="mistral-embed",
),
SecretStrInput(name="mistral_api_key", display_name="Mistral API Key"),
IntInput(
name="max_concurrent_requests",
display_name="Max Concurrent Requests",
advanced=True,
value=64,
),
IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=5),
IntInput(name="timeout", display_name="Request Timeout", advanced=True, value=120),
MessageTextInput(
name="endpoint",
display_name="API Endpoint",
advanced=True,
value="https://api.mistral.ai/v1/",
),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
if not self.mistral_api_key:
raise ValueError("Mistral API Key is required")
api_key = SecretStr(self.mistral_api_key)
return MistralAIEmbeddings(
api_key=api_key,
model=self.model,
endpoint=self.endpoint,
max_concurrent_requests=self.max_concurrent_requests,
max_retries=self.max_retries,
timeout=self.timeout,
)
NVIDIA
This component generates embeddings using NVIDIA models.
Parameters
Name | Type | Description |
---|---|---|
model |
String |
The NVIDIA model to use for embeddings (e.g., |
base_url |
String |
Base URL for the NVIDIA API (default: |
nvidia_api_key |
SecretString |
API key for authenticating with NVIDIA’s service |
temperature |
Float |
Model temperature for embedding generation (default: 0.1) |
Name | Type | Description |
---|---|---|
embeddings |
Embeddings |
NVIDIAEmbeddings instance for generating embeddings |
Component code
NVIDIA.py
from typing import Any
from langflow.base.embeddings.model import LCEmbeddingsModel
from langflow.field_typing import Embeddings
from langflow.inputs.inputs import DropdownInput, SecretStrInput
from langflow.io import FloatInput, MessageTextInput
from langflow.schema.dotdict import dotdict
class NVIDIAEmbeddingsComponent(LCEmbeddingsModel):
display_name: str = "NVIDIA Embeddings"
description: str = "Generate embeddings using NVIDIA models."
icon = "NVIDIA"
inputs = [
DropdownInput(
name="model",
display_name="Model",
options=[
"nvidia/nv-embed-v1",
"snowflake/arctic-embed-I",
],
value="nvidia/nv-embed-v1",
),
MessageTextInput(
name="base_url",
display_name="NVIDIA Base URL",
refresh_button=True,
value="https://integrate.api.nvidia.com/v1",
),
SecretStrInput(
name="nvidia_api_key",
display_name="NVIDIA API Key",
info="The NVIDIA API Key.",
advanced=False,
value="NVIDIA_API_KEY",
),
FloatInput(
name="temperature",
display_name="Model Temperature",
value=0.1,
advanced=True,
),
]
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
if field_name == "base_url" and field_value:
try:
build_model = self.build_embeddings()
ids = [model.id for model in build_model.available_models] # type: ignore
build_config["model"]["options"] = ids
build_config["model"]["value"] = ids[0]
except Exception as e:
raise ValueError(f"Error getting model names: {e}")
return build_config
def build_embeddings(self) -> Embeddings:
try:
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
except ImportError:
raise ImportError("Please install langchain-nvidia-ai-endpoints to use the Nvidia model.")
try:
output = NVIDIAEmbeddings(
model=self.model,
base_url=self.base_url,
temperature=self.temperature,
nvidia_api_key=self.nvidia_api_key,
) # type: ignore
except Exception as e:
raise ValueError(f"Could not connect to NVIDIA API. Error: {e}") from e
return output
Ollama Embeddings
This component generates embeddings using Ollama models.
Use this component to create embeddings with locally run Ollama models.
Ensure you have Ollama set up and running on your system.
Parameters
Name | Display Name | Info |
---|---|---|
Ollama Model |
Model Name |
Name of the Ollama model to use |
Ollama Base URL |
Base URL |
Base URL of the Ollama API |
Model Temperature |
Temperature |
Temperature parameter for the model |
Component code
Ollama.py
from langchain_community.embeddings import OllamaEmbeddings
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import FloatInput, MessageTextInput, Output
class OllamaEmbeddingsComponent(LCModelComponent):
display_name: str = "Ollama Embeddings"
description: str = "Generate embeddings using Ollama models."
documentation = "https://python.langchain.com/docs/integrations/text_embedding/ollama"
icon = "Ollama"
name = "OllamaEmbeddings"
inputs = [
MessageTextInput(
name="model",
display_name="Ollama Model",
value="llama3.1",
),
MessageTextInput(
name="base_url",
display_name="Ollama Base URL",
value="http://localhost:11434",
),
FloatInput(
name="temperature",
display_name="Model Temperature",
value=0.1,
advanced=True,
),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
try:
output = OllamaEmbeddings(
model=self.model,
base_url=self.base_url,
temperature=self.temperature,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to Ollama API.") from e
return output
OpenAI Embeddings
This component loads embedding models from OpenAI.
Use this component to generate embeddings using OpenAI’s models.
Ensure you have a valid OpenAI API key and sufficient quota.
Parameters
Name | Display Name | Info |
---|---|---|
OpenAI API Key |
API Key |
The API key to use for accessing the OpenAI API |
Default Headers |
Default Headers |
Default headers for the HTTP requests |
Default Query |
Default Query |
Default query parameters for the HTTP requests |
Allowed Special |
Allowed Special Tokens |
Special tokens allowed for processing |
Disallowed Special |
Disallowed Special Tokens |
Special tokens disallowed for processing |
Chunk Size |
Chunk Size |
Chunk size for processing |
Client |
HTTP Client |
HTTP client for making requests |
Deployment |
Deployment |
Deployment name for the model |
Embedding Context Length |
Context Length |
Length of embedding context |
Max Retries |
Max Retries |
Maximum number of retries for failed requests |
Model |
Model Name |
Name of the model to use |
Model Kwargs |
Model Arguments |
Additional keyword arguments for the model |
OpenAI API Base |
API Base URL |
Base URL of the OpenAI API |
OpenAI API Type |
API Type |
Type of the OpenAI API |
OpenAI API Version |
API Version |
Version of the OpenAI API |
OpenAI Organization |
Organization |
Organization associated with the API key |
OpenAI Proxy |
Proxy |
Proxy server for the requests |
Request Timeout |
Request Timeout |
Timeout for the HTTP requests |
Show Progress Bar |
Show Progress |
Whether to show a progress bar for processing |
Skip Empty |
Skip Empty |
Whether to skip empty inputs |
TikToken Enable |
Enable TikToken |
Whether to enable TikToken |
TikToken Model Name |
TikToken Model |
Name of the TikToken model |
Component code
OpenAI.py
from langchain_openai.embeddings.base import OpenAIEmbeddings
from langflow.base.embeddings.model import LCEmbeddingsModel
from langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
from langflow.field_typing import Embeddings
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
class OpenAIEmbeddingsComponent(LCEmbeddingsModel):
display_name = "OpenAI Embeddings"
description = "Generate embeddings using OpenAI models."
icon = "OpenAI"
name = "OpenAIEmbeddings"
inputs = [
DictInput(
name="default_headers",
display_name="Default Headers",
advanced=True,
info="Default headers to use for the API request.",
),
DictInput(
name="default_query",
display_name="Default Query",
advanced=True,
info="Default query parameters to use for the API request.",
),
IntInput(name="chunk_size", display_name="Chunk Size", advanced=True, value=1000),
MessageTextInput(name="client", display_name="Client", advanced=True),
MessageTextInput(name="deployment", display_name="Deployment", advanced=True),
IntInput(name="embedding_ctx_length", display_name="Embedding Context Length", advanced=True, value=1536),
IntInput(name="max_retries", display_name="Max Retries", value=3, advanced=True),
DropdownInput(
name="model",
display_name="Model",
advanced=False,
options=OPENAI_EMBEDDING_MODEL_NAMES,
value="text-embedding-3-small",
),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
SecretStrInput(name="openai_api_base", display_name="OpenAI API Base", advanced=True),
SecretStrInput(name="openai_api_key", display_name="OpenAI API Key", value="OPENAI_API_KEY"),
SecretStrInput(name="openai_api_type", display_name="OpenAI API Type", advanced=True),
MessageTextInput(name="openai_api_version", display_name="OpenAI API Version", advanced=True),
MessageTextInput(
name="openai_organization",
display_name="OpenAI Organization",
advanced=True,
),
MessageTextInput(name="openai_proxy", display_name="OpenAI Proxy", advanced=True),
FloatInput(name="request_timeout", display_name="Request Timeout", advanced=True),
BoolInput(name="show_progress_bar", display_name="Show Progress Bar", advanced=True),
BoolInput(name="skip_empty", display_name="Skip Empty", advanced=True),
MessageTextInput(
name="tiktoken_model_name",
display_name="TikToken Model Name",
advanced=True,
),
BoolInput(
name="tiktoken_enable",
display_name="TikToken Enable",
advanced=True,
value=True,
info="If False, you must have transformers installed.",
),
IntInput(
name="dimensions",
display_name="Dimensions",
info="The number of dimensions the resulting output embeddings should have. Only supported by certain models.",
advanced=True,
),
]
def build_embeddings(self) -> Embeddings:
return OpenAIEmbeddings(
tiktoken_enabled=self.tiktoken_enable,
default_headers=self.default_headers,
default_query=self.default_query,
allowed_special="all",
disallowed_special="all",
chunk_size=self.chunk_size,
deployment=self.deployment,
embedding_ctx_length=self.embedding_ctx_length,
max_retries=self.max_retries,
model=self.model,
model_kwargs=self.model_kwargs,
base_url=self.openai_api_base,
api_key=self.openai_api_key,
openai_api_type=self.openai_api_type,
api_version=self.openai_api_version,
organization=self.openai_organization,
openai_proxy=self.openai_proxy,
timeout=self.request_timeout or None,
show_progress_bar=self.show_progress_bar,
skip_empty=self.skip_empty,
tiktoken_model_name=self.tiktoken_model_name,
dimensions=self.dimensions or None,
)
VertexAI Embeddings
This component wraps around Google Vertex AI Embeddings API.
Use this component to generate embeddings using Google’s Vertex AI service.
Ensure you have the necessary Google Cloud credentials and permissions.
Parameters
Name | Display Name | Info |
---|---|---|
credentials |
Credentials |
The default custom credentials to use |
location |
Location |
The default location to use when making API calls |
max_output_tokens |
Max Output Tokens |
Token limit for text output from one prompt |
model_name |
Model Name |
The name of the Vertex AI large language model |
project |
Project |
The default GCP project to use when making Vertex API calls |
request_parallelism |
Request Parallelism |
The amount of parallelism allowed for requests |
temperature |
Temperature |
Tunes the degree of randomness in text generations |
top_k |
Top K |
How the model selects tokens for output |
top_p |
Top P |
Probability threshold for token selection |
tuned_model_name |
Tuned Model Name |
The name of a tuned model (overrides model_name if provided) |
verbose |
Verbose |
Controls the level of detail in the output |
Component code
VertexAI.py
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, MessageTextInput, Output
class VertexAIEmbeddingsComponent(LCModelComponent):
display_name = "VertexAI Embeddings"
description = "Generate embeddings using Google Cloud VertexAI models."
icon = "VertexAI"
name = "VertexAIEmbeddings"
inputs = [
FileInput(
name="credentials",
display_name="Credentials",
info="JSON credentials file. Leave empty to fallback to environment variables",
value="",
file_types=["json"],
),
MessageTextInput(name="location", display_name="Location", value="us-central1", advanced=True),
MessageTextInput(name="project", display_name="Project", info="The project ID.", advanced=True),
IntInput(name="max_output_tokens", display_name="Max Output Tokens", advanced=True),
IntInput(name="max_retries", display_name="Max Retries", value=1, advanced=True),
MessageTextInput(name="model_name", display_name="Model Name", value="textembedding-gecko"),
IntInput(name="n", display_name="N", value=1, advanced=True),
IntInput(name="request_parallelism", value=5, display_name="Request Parallelism", advanced=True),
MessageTextInput(name="stop_sequences", display_name="Stop", advanced=True, is_list=True),
BoolInput(name="streaming", display_name="Streaming", value=False, advanced=True),
FloatInput(name="temperature", value=0.0, display_name="Temperature"),
IntInput(name="top_k", display_name="Top K", advanced=True),
FloatInput(name="top_p", display_name="Top P", value=0.95, advanced=True),
]
outputs = [
Output(display_name="Embeddings", name="embeddings", method="build_embeddings"),
]
def build_embeddings(self) -> Embeddings:
try:
from langchain_google_vertexai import VertexAIEmbeddings
except ImportError:
raise ImportError(
"Please install the langchain-google-vertexai package to use the VertexAIEmbeddings component."
)
from google.oauth2 import service_account
if self.credentials:
gcloud_credentials = service_account.Credentials.from_service_account_file(self.credentials)
else:
# will fallback to environment variable or inferred from gcloud CLI
gcloud_credentials = None
return VertexAIEmbeddings(
credentials=gcloud_credentials,
location=self.location,
max_output_tokens=self.max_output_tokens or None,
max_retries=self.max_retries,
model_name=self.model_name,
n=self.n,
project=self.project,
request_parallelism=self.request_parallelism,
stop=self.stop_sequences or None,
streaming=self.streaming,
temperature=self.temperature,
top_k=self.top_k or None,
top_p=self.top_p,
)