Models
Model components are used to generate text using language models. These components can be used to generate text for various tasks such as chatbots, content generation, and more.
AI/ML API
This component creates a ChatOpenAI model instance using the AIML API.
For more information, see AIML documentation.
Parameters
Name | Type | Description |
---|---|---|
max_tokens |
Integer |
The maximum number of tokens to generate. Set to 0 for unlimited tokens. Range: 0-128000. |
model_kwargs |
Dictionary |
Additional keyword arguments for the model. |
model_name |
String |
The name of the AIML model to use. Options are predefined in AIML_CHAT_MODELS. |
aiml_api_base |
String |
The base URL of the AIML API. Defaults to https://api.aimlapi.com. |
api_key |
SecretString |
The AIML API Key to use for the model. |
temperature |
Float |
Controls randomness in the output. Default: 0.1. |
seed |
Integer |
Controls reproducibility of the job. |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatOpenAI configured with the specified parameters. |
Component code
AIML.py
from langflow.field_typing.range_spec import RangeSpec
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from langflow.base.models.aiml_constants import AIML_CHAT_MODELS
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import (
DictInput,
DropdownInput,
FloatInput,
IntInput,
SecretStrInput,
StrInput,
)
class AIMLModelComponent(LCModelComponent):
display_name = "AIML"
description = "Generates text using AIML LLMs."
icon = "AIML"
name = "AIMLModel"
documentation = "https://docs.aimlapi.com/api-reference"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
range_spec=RangeSpec(min=0, max=128000),
),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=AIML_CHAT_MODELS,
value=AIML_CHAT_MODELS[0],
),
StrInput(
name="aiml_api_base",
display_name="AIML API Base",
advanced=True,
info="The base URL of the OpenAI API. Defaults to https://api.aimlapi.com . You can change this to use other APIs like JinaChat, LocalAI e Prem.",
),
SecretStrInput(
name="api_key",
display_name="AIML API Key",
info="The AIML API Key to use for the OpenAI model.",
advanced=False,
value="AIML_API_KEY",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
aiml_api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = self.model_kwargs or {}
aiml_api_base = self.aiml_api_base or "https://api.aimlapi.com"
seed = self.seed
if isinstance(aiml_api_key, SecretStr):
openai_api_key = aiml_api_key.get_secret_value()
else:
openai_api_key = aiml_api_key
model = ChatOpenAI(
model=model_name,
temperature=temperature,
api_key=openai_api_key,
base_url=aiml_api_base,
max_tokens=max_tokens or None,
seed=seed,
**model_kwargs,
)
return model # type: ignore
def _get_exception_message(self, e: Exception):
"""
Get a message from an OpenAI exception.
Args:
exception (Exception): The exception to get the message from.
Returns:
str: The message from the exception.
"""
try:
from openai.error import BadRequestError
except ImportError:
return None
if isinstance(e, BadRequestError):
message = e.json_body.get("error", {}).get("message", "") # type: ignore
if message:
return message
return None
Amazon Bedrock
This component generates text using Amazon Bedrock LLMs.
For more information, see Amazon Bedrock documentation.
Parameters
Name | Type | Description |
---|---|---|
model_id |
String |
The ID of the Amazon Bedrock model to use. Options include various models from Amazon, Anthropic, AI21, Cohere, Meta, Mistral, and Stability AI. |
aws_access_key |
SecretString |
AWS Access Key for authentication. |
aws_secret_key |
SecretString |
AWS Secret Key for authentication. |
credentials_profile_name |
String |
Name of the AWS credentials profile to use (advanced). |
region_name |
String |
AWS region name. Default: "us-east-1". |
model_kwargs |
Dictionary |
Additional keyword arguments for the model (advanced). |
endpoint_url |
String |
Custom endpoint URL for the Bedrock service (advanced). |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatBedrock configured with the specified parameters. |
Component code
AmazonBedrock.py
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import MessageTextInput, SecretStrInput
from langflow.io import DictInput, DropdownInput
class AmazonBedrockComponent(LCModelComponent):
display_name: str = "Amazon Bedrock"
description: str = "Generate text using Amazon Bedrock LLMs."
icon = "Amazon"
name = "AmazonBedrockModel"
inputs = LCModelComponent._base_inputs + [
DropdownInput(
name="model_id",
display_name="Model ID",
options=[
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"amazon.titan-text-premier-v1:0",
"amazon.titan-embed-text-v1",
"amazon.titan-embed-text-v2:0",
"amazon.titan-embed-image-v1",
"amazon.titan-image-generator-v1",
"anthropic.claude-v2",
"anthropic.claude-v2:1",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-instant-v1",
"ai21.j2-mid-v1",
"ai21.j2-ultra-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"cohere.command-r-v1:0",
"cohere.command-r-plus-v1:0",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
"meta.llama2-13b-chat-v1",
"meta.llama2-70b-chat-v1",
"meta.llama3-8b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"stability.stable-diffusion-xl-v0",
"stability.stable-diffusion-xl-v1",
],
value="anthropic.claude-3-haiku-20240307-v1:0",
),
SecretStrInput(name="aws_access_key", display_name="Access Key"),
SecretStrInput(name="aws_secret_key", display_name="Secret Key"),
MessageTextInput(name="credentials_profile_name", display_name="Credentials Profile Name", advanced=True),
MessageTextInput(name="region_name", display_name="Region Name", value="us-east-1"),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True, is_list=True),
MessageTextInput(name="endpoint_url", display_name="Endpoint URL", advanced=True),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
try:
from langchain_aws import ChatBedrock
except ImportError:
raise ImportError("langchain_aws is not installed. Please install it with `pip install langchain_aws`.")
if self.aws_access_key:
import boto3 # type: ignore
session = boto3.Session(
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key,
)
elif self.credentials_profile_name:
import boto3
session = boto3.Session(profile_name=self.credentials_profile_name)
else:
import boto3
session = boto3.Session()
client_params = {}
if self.endpoint_url:
client_params["endpoint_url"] = self.endpoint_url
if self.region_name:
client_params["region_name"] = self.region_name
boto3_client = session.client("bedrock-runtime", **client_params)
try:
output = ChatBedrock( # type: ignore
client=boto3_client,
model_id=self.model_id,
region_name=self.region_name,
model_kwargs=self.model_kwargs,
endpoint_url=self.endpoint_url,
streaming=self.stream,
)
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
return output # type: ignore
Anthropic
This component allows the generation of text using Anthropic Chat and Language models.
For more information, see the Anthropic documentation.
Parameters
Name | Type | Description |
---|---|---|
max_tokens |
Integer |
The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 4096. |
model |
String |
The name of the Anthropic model to use. Options include various Claude 3 models. |
anthropic_api_key |
SecretString |
Your Anthropic API key for authentication. |
temperature |
Float |
Controls randomness in the output. Default: 0.1. |
anthropic_api_url |
String |
Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified (advanced). |
prefill |
String |
Prefill text to guide the model’s response (advanced). |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatAnthropic configured with the specified parameters. |
Component code
Anthropic.py
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
class AnthropicModelComponent(LCModelComponent):
display_name = "Anthropic"
description = "Generate text using Anthropic Chat&Completion LLMs with prefill support."
icon = "Anthropic"
name = "AnthropicModel"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
value=4096,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model",
display_name="Model Name",
options=[
"claude-3-5-sonnet-20240620",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
],
info="https://python.langchain.com/docs/integrations/chat/anthropic",
value="claude-3-5-sonnet-20240620",
),
SecretStrInput(
name="anthropic_api_key",
display_name="Anthropic API Key",
info="Your Anthropic API key.",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
MessageTextInput(
name="anthropic_api_url",
display_name="Anthropic API URL",
advanced=True,
info="Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
),
MessageTextInput(
name="prefill",
display_name="Prefill",
info="Prefill text to guide the model's response.",
advanced=True,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
try:
from langchain_anthropic.chat_models import ChatAnthropic
except ImportError:
raise ImportError(
"langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`."
)
model = self.model
anthropic_api_key = self.anthropic_api_key
max_tokens = self.max_tokens
temperature = self.temperature
anthropic_api_url = self.anthropic_api_url or "https://api.anthropic.com"
try:
output = ChatAnthropic(
model=model,
anthropic_api_key=(SecretStr(anthropic_api_key) if anthropic_api_key else None),
max_tokens_to_sample=max_tokens, # type: ignore
temperature=temperature,
anthropic_api_url=anthropic_api_url,
streaming=self.stream,
)
except Exception as e:
raise ValueError("Could not connect to Anthropic API.") from e
return output # type: ignore
def _get_exception_message(self, exception: Exception) -> str | None:
"""
Get a message from an Anthropic exception.
Args:
exception (Exception): The exception to get the message from.
Returns:
str: The message from the exception.
"""
try:
from anthropic import BadRequestError
except ImportError:
return None
if isinstance(exception, BadRequestError):
message = exception.body.get("error", {}).get("message") # type: ignore
if message:
return message
return None
Azure OpenAI
This component generates text using Azure OpenAI LLM.
For more information, see the Azure OpenAI documentation.
Parameters
Name | Display Name | Info |
---|---|---|
Model Name |
Model Name |
Specifies the name of the Azure OpenAI model to be used for text generation. |
Azure Endpoint |
Azure Endpoint |
Your Azure endpoint, including the resource. |
Deployment Name |
Deployment Name |
Specifies the name of the deployment. |
API Version |
API Version |
Specifies the version of the Azure OpenAI API to be used. |
API Key |
API Key |
Your Azure OpenAI API key. |
Temperature |
Temperature |
Specifies the sampling temperature. Defaults to |
Max Tokens |
Max Tokens |
Specifies the maximum number of tokens to generate. Defaults to |
Input Value |
Input Value |
Specifies the input text for text generation. |
Stream |
Stream |
Specifies whether to stream the response from the model. Defaults to |
Component code
AzureOpenAI.py
from langchain_openai import AzureChatOpenAI
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import MessageTextInput
from langflow.io import DropdownInput, FloatInput, IntInput, SecretStrInput
class AzureChatOpenAIComponent(LCModelComponent):
display_name: str = "Azure OpenAI"
description: str = "Generate text using Azure OpenAI LLMs."
documentation: str = "https://python.langchain.com/docs/integrations/llms/azure_openai"
beta = False
icon = "Azure"
name = "AzureOpenAIModel"
AZURE_OPENAI_API_VERSIONS = [
"2023-03-15-preview",
"2023-05-15",
"2023-06-01-preview",
"2023-07-01-preview",
"2023-08-01-preview",
"2023-09-01-preview",
"2023-12-01-preview",
"2024-04-09",
"2024-05-13",
]
inputs = LCModelComponent._base_inputs + [
MessageTextInput(
name="azure_endpoint",
display_name="Azure Endpoint",
info="Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`",
required=True,
),
MessageTextInput(name="azure_deployment", display_name="Deployment Name", required=True),
SecretStrInput(name="api_key", display_name="API Key"),
DropdownInput(
name="api_version",
display_name="API Version",
options=AZURE_OPENAI_API_VERSIONS,
value=AZURE_OPENAI_API_VERSIONS[-1],
),
FloatInput(name="temperature", display_name="Temperature", value=0.7),
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
azure_endpoint = self.azure_endpoint
azure_deployment = self.azure_deployment
api_version = self.api_version
api_key = self.api_key
temperature = self.temperature
max_tokens = self.max_tokens
stream = self.stream
try:
output = AzureChatOpenAI(
azure_endpoint=azure_endpoint,
azure_deployment=azure_deployment,
api_version=api_version,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens or None,
streaming=stream,
)
except Exception as e:
raise ValueError(f"Could not connect to AzureOpenAI API: {str(e)}") from e
return output # type: ignore
Cohere
This component generates text using Cohere’s language models.
For more information, see the Cohere documentation.
Parameters
Name | Display Name | Info |
---|---|---|
Cohere API Key |
Cohere API Key |
Your Cohere API key. |
Max Tokens |
Max Tokens |
Specifies the maximum number of tokens to generate. Defaults to |
Temperature |
Temperature |
Specifies the sampling temperature. Defaults to |
Input Value |
Input Value |
Specifies the input text for text generation. |
Component code
Cohere.py
from langchain_cohere import ChatCohere
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import FloatInput, SecretStrInput
class CohereComponent(LCModelComponent):
display_name = "Cohere"
description = "Generate text using Cohere LLMs."
documentation = "https://python.langchain.com/docs/modules/model_io/models/llms/integrations/cohere"
icon = "Cohere"
name = "CohereModel"
inputs = LCModelComponent._base_inputs + [
SecretStrInput(
name="cohere_api_key",
display_name="Cohere API Key",
info="The Cohere API Key to use for the Cohere model.",
advanced=False,
value="COHERE_API_KEY",
),
FloatInput(name="temperature", display_name="Temperature", value=0.75),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
cohere_api_key = self.cohere_api_key
temperature = self.temperature
if cohere_api_key:
api_key = SecretStr(cohere_api_key)
else:
api_key = None
output = ChatCohere(
temperature=temperature or 0.75,
cohere_api_key=api_key,
)
return output # type: ignore
Google Generative AI
This component generates text using Google’s Generative AI models.
For more information, see the Google Generative AI documentation.
Parameters
Name | Display Name | Info |
---|---|---|
Google API Key |
Google API Key |
Your Google API key to use for the Google Generative AI. |
Model |
Model |
The name of the model to use, such as |
Max Output Tokens |
Max Output Tokens |
The maximum number of tokens to generate. |
Temperature |
Temperature |
Run inference with this temperature. |
Top K |
Top K |
Consider the set of top K most probable tokens. |
Top P |
Top P |
The maximum cumulative probability of tokens to consider when sampling. |
N |
N |
Number of chat completions to generate for each prompt. |
Component code
GoogleGenerativeAI.py
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput
class GoogleGenerativeAIComponent(LCModelComponent):
display_name = "Google Generative AI"
description = "Generate text using Google Generative AI."
icon = "GoogleGenerativeAI"
name = "GoogleGenerativeAIModel"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_output_tokens",
display_name="Max Output Tokens",
info="The maximum number of tokens to generate.",
),
DropdownInput(
name="model",
display_name="Model",
info="The name of the model to use.",
options=["gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.0-pro", "gemini-1.0-pro-vision"],
value="gemini-1.5-pro",
),
SecretStrInput(
name="google_api_key",
display_name="Google API Key",
info="The Google API Key to use for the Google Generative AI.",
),
FloatInput(
name="top_p",
display_name="Top P",
info="The maximum cumulative probability of tokens to consider when sampling.",
advanced=True,
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
IntInput(
name="n",
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
IntInput(
name="top_k",
display_name="Top K",
info="Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
advanced=True,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
try:
from langchain_google_genai import ChatGoogleGenerativeAI
except ImportError:
raise ImportError("The 'langchain_google_genai' package is required to use the Google Generative AI model.")
google_api_key = self.google_api_key
model = self.model
max_output_tokens = self.max_output_tokens
temperature = self.temperature
top_k = self.top_k
top_p = self.top_p
n = self.n
output = ChatGoogleGenerativeAI( # type: ignore
model=model,
max_output_tokens=max_output_tokens or None,
temperature=temperature,
top_k=top_k or None,
top_p=top_p or None,
n=n or 1,
google_api_key=SecretStr(google_api_key),
)
return output # type: ignore
Groq
This component generates text using Groq’s language models.
For more information, see the Groq documentation.
Parameters
Name | Type | Description |
---|---|---|
groq_api_key |
SecretString |
API key for the Groq API. |
groq_api_base |
String |
Base URL path for API requests. Default: "https://api.groq.com" (advanced). |
max_tokens |
Integer |
The maximum number of tokens to generate (advanced). |
temperature |
Float |
Controls randomness in the output. Range: [0.0, 1.0]. Default: 0.1. |
n |
Integer |
Number of chat completions to generate for each prompt (advanced). |
model_name |
String |
The name of the Groq model to use. Options are dynamically fetched from the Groq API. |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatGroq configured with the specified parameters. |
Component code
Groq.py
import requests
from typing import List
from langchain_groq import ChatGroq
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
class GroqModel(LCModelComponent):
display_name: str = "Groq"
description: str = "Generate text using Groq."
icon = "Groq"
name = "GroqModel"
inputs = LCModelComponent._base_inputs + [
SecretStrInput(
name="groq_api_key",
display_name="Groq API Key",
info="API key for the Groq API.",
),
MessageTextInput(
name="groq_api_base",
display_name="Groq API Base",
info="Base URL path for API requests, leave blank if not using a proxy or service emulator.",
advanced=True,
value="https://api.groq.com",
),
IntInput(
name="max_tokens",
display_name="Max Output Tokens",
info="The maximum number of tokens to generate.",
advanced=True,
),
FloatInput(
name="temperature",
display_name="Temperature",
info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].",
value=0.1,
),
IntInput(
name="n",
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
DropdownInput(
name="model_name",
display_name="Model",
info="The name of the model to use.",
options=[],
refresh_button=True,
),
]
def get_models(self) -> List[str]:
api_key = self.groq_api_key
base_url = self.groq_api_base or "https://api.groq.com"
url = f"{base_url}/openai/v1/models"
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
model_list = response.json()
return [model["id"] for model in model_list.get("data", [])]
except requests.RequestException as e:
self.status = f"Error fetching models: {str(e)}"
return []
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
if field_name == "groq_api_key" or field_name == "groq_api_base" or field_name == "model_name":
models = self.get_models()
build_config["model_name"]["options"] = models
return build_config
def build_model(self) -> LanguageModel: # type: ignore[type-var]
groq_api_key = self.groq_api_key
model_name = self.model_name
max_tokens = self.max_tokens
temperature = self.temperature
groq_api_base = self.groq_api_base
n = self.n
stream = self.stream
output = ChatGroq( # type: ignore
model=model_name,
max_tokens=max_tokens or None,
temperature=temperature,
base_url=groq_api_base,
n=n or 1,
api_key=SecretStr(groq_api_key),
streaming=stream,
)
return output # type: ignore
Hugging Face API
This component generates text using Hugging Face’s language models.
For more information, see the Hugging Face documentation.
Parameters
Name | Display Name | Info |
---|---|---|
Endpoint URL |
Endpoint URL |
The URL of the Hugging Face Inference API endpoint. |
Task |
Task |
Specifies the task for text generation. |
API Token |
API Token |
The API token required for authentication. |
Model Kwargs |
Model Kwargs |
Additional keyword arguments for the model. |
Input Value |
Input Value |
The input text for text generation. |
Component code
HuggingFace.py
from tenacity import retry, stop_after_attempt, wait_fixed
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import DictInput, DropdownInput, SecretStrInput, StrInput, IntInput
class HuggingFaceEndpointsComponent(LCModelComponent):
display_name: str = "HuggingFace"
description: str = "Generate text using Hugging Face Inference APIs."
icon = "HuggingFace"
name = "HuggingFaceModel"
inputs = LCModelComponent._base_inputs + [
StrInput(
name="model_id",
display_name="Model ID",
value="openai-community/gpt2",
),
DropdownInput(
name="task",
display_name="Task",
options=["text2text-generation", "text-generation", "summarization", "translation"],
value="text-generation",
),
SecretStrInput(name="huggingfacehub_api_token", display_name="API Token", password=True),
DictInput(name="model_kwargs", display_name="Model Keyword Arguments", advanced=True),
IntInput(name="retry_attempts", display_name="Retry Attempts", value=1, advanced=True),
]
def create_huggingface_endpoint(
self, model_id: str, task: str, huggingfacehub_api_token: str, model_kwargs: dict
) -> HuggingFaceEndpoint:
retry_attempts = self.retry_attempts # Access the retry attempts input
endpoint_url = f"https://api-inference.huggingface.co/models/{model_id}"
@retry(stop=stop_after_attempt(retry_attempts), wait=wait_fixed(2))
def _attempt_create():
return HuggingFaceEndpoint(
endpoint_url=endpoint_url,
task=task,
huggingfacehub_api_token=huggingfacehub_api_token,
model_kwargs=model_kwargs,
)
return _attempt_create()
def build_model(self) -> LanguageModel: # type: ignore[type-var]
model_id = self.model_id
task = self.task
huggingfacehub_api_token = self.huggingfacehub_api_token
model_kwargs = self.model_kwargs or {}
try:
llm = self.create_huggingface_endpoint(model_id, task, huggingfacehub_api_token, model_kwargs)
except Exception as e:
raise ValueError("Could not connect to HuggingFace Endpoints API.") from e
return llm
Maritalk
This component generates text using Maritalk LLMs. For more information, see Maritalk documentation.
Parameters
Name | Type | Description |
---|---|---|
max_tokens |
Integer |
The maximum number of tokens to generate. Set to 0 for unlimited tokens. Default: 512. |
model_name |
String |
The name of the Maritalk model to use. Options: "sabia-2-small", "sabia-2-medium". Default: "sabia-2-small". |
api_key |
SecretString |
The Maritalk API Key to use for authentication. |
temperature |
Float |
Controls randomness in the output. Range: [0, 1]. Default: 0.1. |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatMaritalk configured with the specified parameters. |
Component code
Maritalk.py
from langchain_community.chat_models import ChatMaritalk
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput
class MaritalkModelComponent(LCModelComponent):
display_name = "Maritalk"
description = "Generates text using Maritalk LLMs."
icon = "Maritalk"
name = "Maritalk"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
value=512,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=["sabia-2-small", "sabia-2-medium"],
value=["sabia-2-small"],
),
SecretStrInput(
name="api_key",
display_name="Maritalk API Key",
info="The Maritalk API Key to use for the OpenAI model.",
advanced=False,
),
FloatInput(name="temperature", display_name="Temperature", value=0.1, range_spec=RangeSpec(min=0, max=1)),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
# self.output_schea is a list of dictionarie s
# let's convert it to a dictionary
api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
output = ChatMaritalk(
max_tokens=max_tokens,
model=model_name,
api_key=api_key,
temperature=temperature or 0.1,
)
return output # type: ignore
Mistral Model
This component generates text using MistralAI LLMs.
For more information, see Mistral AI documentation.
Parameters
Name | Type | Description |
---|---|---|
max_tokens |
Integer |
The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). |
model_name |
String |
The name of the Mistral AI model to use. Options include "open-mixtral-8x7b", "open-mixtral-8x22b", "mistral-small-latest", "mistral-medium-latest", "mistral-large-latest", and "codestral-latest". Default: "codestral-latest". |
mistral_api_base |
String |
The base URL of the Mistral API. Defaults to https://api.mistral.ai/v1 (advanced). |
api_key |
SecretString |
The Mistral API Key to use for authentication. |
temperature |
Float |
Controls randomness in the output. Default: 0.5. |
max_retries |
Integer |
Maximum number of retries for API calls. Default: 5 (advanced). |
timeout |
Integer |
Timeout for API calls in seconds. Default: 60 (advanced). |
max_concurrent_requests |
Integer |
Maximum number of concurrent API requests. Default: 3 (advanced). |
top_p |
Float |
Nucleus sampling parameter. Default: 1 (advanced). |
random_seed |
Integer |
Seed for random number generation. Default: 1 (advanced). |
safe_mode |
Boolean |
Enables safe mode for content generation (advanced). |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatMistralAI configured with the specified parameters. |
Component code
Mistral.py
from langchain_mistralai import ChatMistralAI
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
class MistralAIModelComponent(LCModelComponent):
display_name = "MistralAI"
description = "Generates text using MistralAI LLMs."
icon = "MistralAI"
name = "MistralModel"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=[
"open-mixtral-8x7b",
"open-mixtral-8x22b",
"mistral-small-latest",
"mistral-medium-latest",
"mistral-large-latest",
"codestral-latest",
],
value="codestral-latest",
),
StrInput(
name="mistral_api_base",
display_name="Mistral API Base",
advanced=True,
info=(
"The base URL of the Mistral API. Defaults to https://api.mistral.ai/v1. "
"You can change this to use other APIs like JinaChat, LocalAI and Prem."
),
),
SecretStrInput(
name="api_key",
display_name="Mistral API Key",
info="The Mistral API Key to use for the Mistral model.",
advanced=False,
),
FloatInput(name="temperature", display_name="Temperature", advanced=False, value=0.5),
IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=5),
IntInput(name="timeout", display_name="Timeout", advanced=True, value=60),
IntInput(name="max_concurrent_requests", display_name="Max Concurrent Requests", advanced=True, value=3),
FloatInput(name="top_p", display_name="Top P", advanced=True, value=1),
IntInput(name="random_seed", display_name="Random Seed", value=1, advanced=True),
BoolInput(name="safe_mode", display_name="Safe Mode", advanced=True),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
mistral_api_key = self.api_key
temperature = self.temperature
model_name = self.model_name
max_tokens = self.max_tokens
mistral_api_base = self.mistral_api_base or "https://api.mistral.ai/v1"
max_retries = self.max_retries
timeout = self.timeout
max_concurrent_requests = self.max_concurrent_requests
top_p = self.top_p
random_seed = self.random_seed
safe_mode = self.safe_mode
if mistral_api_key:
api_key = SecretStr(mistral_api_key)
else:
api_key = None
output = ChatMistralAI(
max_tokens=max_tokens or None,
model_name=model_name,
endpoint=mistral_api_base,
api_key=api_key,
temperature=temperature,
max_retries=max_retries,
timeout=timeout,
max_concurrent_requests=max_concurrent_requests,
top_p=top_p,
random_seed=random_seed,
safe_mode=safe_mode,
)
return output # type: ignore
NVIDIA
This component generates text using NVIDIA LLMs.
For more information, see NVIDIA AI Foundation Models documentation.
Parameters
Name | Type | Description |
---|---|---|
max_tokens |
Integer |
The maximum number of tokens to generate. Set to 0 for unlimited tokens (advanced). |
model_name |
String |
The name of the NVIDIA model to use. Default: "mistralai/mixtral-8x7b-instruct-v0.1". |
base_url |
String |
The base URL of the NVIDIA API. Default: "https://integrate.api.nvidia.com/v1". |
nvidia_api_key |
SecretString |
The NVIDIA API Key for authentication. |
temperature |
Float |
Controls randomness in the output. Default: 0.1. |
seed |
Integer |
The seed controls the reproducibility of the job (advanced). Default: 1. |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatNVIDIA configured with the specified parameters. |
Component code
Nvidia.py
from typing import Any
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
from langflow.schema.dotdict import dotdict
class NVIDIAModelComponent(LCModelComponent):
display_name = "NVIDIA"
description = "Generates text using NVIDIA LLMs."
icon = "NVIDIA"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=["mistralai/mixtral-8x7b-instruct-v0.1"],
value="mistralai/mixtral-8x7b-instruct-v0.1",
),
StrInput(
name="base_url",
display_name="NVIDIA Base URL",
value="https://integrate.api.nvidia.com/v1",
refresh_button=True,
info="The base URL of the NVIDIA API. Defaults to https://integrate.api.nvidia.com/v1.",
),
SecretStrInput(
name="nvidia_api_key",
display_name="NVIDIA API Key",
info="The NVIDIA API Key.",
advanced=False,
value="NVIDIA_API_KEY",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
]
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
if field_name == "base_url" and field_value:
try:
build_model = self.build_model()
ids = [model.id for model in build_model.available_models] # type: ignore
build_config["model_name"]["options"] = ids
build_config["model_name"]["value"] = ids[0]
except Exception as e:
raise ValueError(f"Error getting model names: {e}")
return build_config
def build_model(self) -> LanguageModel: # type: ignore[type-var]
try:
from langchain_nvidia_ai_endpoints import ChatNVIDIA
except ImportError:
raise ImportError("Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.")
nvidia_api_key = self.nvidia_api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
seed = self.seed
output = ChatNVIDIA(
max_tokens=max_tokens or None,
model=model_name,
base_url=self.base_url,
api_key=nvidia_api_key, # type: ignore
temperature=temperature or 0.1,
seed=seed,
)
return output # type: ignore
Ollama
This component generates text using Ollama’s language models.
For more information, see the Ollama documentation.
Parameters
Name | Display Name | Info |
---|---|---|
Base URL |
Base URL |
Endpoint of the Ollama API. |
Model Name |
Model Name |
The model name to use. |
Temperature |
Temperature |
Controls the creativity of model responses. |
Component code
Ollama.py
from typing import Any
from urllib.parse import urljoin
import httpx
from langchain_community.chat_models import ChatOllama
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput
class ChatOllamaComponent(LCModelComponent):
display_name = "Ollama"
description = "Generate text using Ollama Local LLMs."
icon = "Ollama"
name = "OllamaModel"
def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
if field_name == "mirostat":
if field_value == "Disabled":
build_config["mirostat_eta"]["advanced"] = True
build_config["mirostat_tau"]["advanced"] = True
build_config["mirostat_eta"]["value"] = None
build_config["mirostat_tau"]["value"] = None
else:
build_config["mirostat_eta"]["advanced"] = False
build_config["mirostat_tau"]["advanced"] = False
if field_value == "Mirostat 2.0":
build_config["mirostat_eta"]["value"] = 0.2
build_config["mirostat_tau"]["value"] = 10
else:
build_config["mirostat_eta"]["value"] = 0.1
build_config["mirostat_tau"]["value"] = 5
if field_name == "model_name":
base_url_dict = build_config.get("base_url", {})
base_url_load_from_db = base_url_dict.get("load_from_db", False)
base_url_value = base_url_dict.get("value")
if base_url_load_from_db:
base_url_value = self.variables(base_url_value)
elif not base_url_value:
base_url_value = "http://localhost:11434"
build_config["model_name"]["options"] = self.get_model(base_url_value)
if field_name == "keep_alive_flag":
if field_value == "Keep":
build_config["keep_alive"]["value"] = "-1"
build_config["keep_alive"]["advanced"] = True
elif field_value == "Immediately":
build_config["keep_alive"]["value"] = "0"
build_config["keep_alive"]["advanced"] = True
else:
build_config["keep_alive"]["advanced"] = False
return build_config
def get_model(self, base_url_value: str) -> list[str]:
try:
url = urljoin(base_url_value, "/api/tags")
with httpx.Client() as client:
response = client.get(url)
response.raise_for_status()
data = response.json()
model_names = [model["name"] for model in data.get("models", [])]
return model_names
except Exception as e:
raise ValueError("Could not retrieve models. Please, make sure Ollama is running.") from e
inputs = LCModelComponent._base_inputs + [
StrInput(
name="base_url",
display_name="Base URL",
info="Endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified.",
value="http://localhost:11434",
),
DropdownInput(
name="model_name",
display_name="Model Name",
value="llama3.1",
info="Refer to https://ollama.com/library for more models.",
refresh_button=True,
),
FloatInput(
name="temperature",
display_name="Temperature",
value=0.2,
info="Controls the creativity of model responses.",
),
StrInput(
name="format",
display_name="Format",
info="Specify the format of the output (e.g., json).",
advanced=True,
),
DictInput(
name="metadata",
display_name="Metadata",
info="Metadata to add to the run trace.",
advanced=True,
),
DropdownInput(
name="mirostat",
display_name="Mirostat",
options=["Disabled", "Mirostat", "Mirostat 2.0"],
info="Enable/disable Mirostat sampling for controlling perplexity.",
value="Disabled",
advanced=True,
real_time_refresh=True,
),
FloatInput(
name="mirostat_eta",
display_name="Mirostat Eta",
info="Learning rate for Mirostat algorithm. (Default: 0.1)",
advanced=True,
),
FloatInput(
name="mirostat_tau",
display_name="Mirostat Tau",
info="Controls the balance between coherence and diversity of the output. (Default: 5.0)",
advanced=True,
),
IntInput(
name="num_ctx",
display_name="Context Window Size",
info="Size of the context window for generating tokens. (Default: 2048)",
advanced=True,
),
IntInput(
name="num_gpu",
display_name="Number of GPUs",
info="Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
advanced=True,
),
IntInput(
name="num_thread",
display_name="Number of Threads",
info="Number of threads to use during computation. (Default: detected for optimal performance)",
advanced=True,
),
IntInput(
name="repeat_last_n",
display_name="Repeat Last N",
info="How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
advanced=True,
),
FloatInput(
name="repeat_penalty",
display_name="Repeat Penalty",
info="Penalty for repetitions in generated text. (Default: 1.1)",
advanced=True,
),
FloatInput(
name="tfs_z",
display_name="TFS Z",
info="Tail free sampling value. (Default: 1)",
advanced=True,
),
IntInput(
name="timeout",
display_name="Timeout",
info="Timeout for the request stream.",
advanced=True,
),
IntInput(
name="top_k",
display_name="Top K",
info="Limits token selection to top K. (Default: 40)",
advanced=True,
),
FloatInput(
name="top_p",
display_name="Top P",
info="Works together with top-k. (Default: 0.9)",
advanced=True,
),
BoolInput(
name="verbose",
display_name="Verbose",
info="Whether to print out response text.",
),
StrInput(
name="tags",
display_name="Tags",
info="Comma-separated list of tags to add to the run trace.",
advanced=True,
),
StrInput(
name="stop_tokens",
display_name="Stop Tokens",
info="Comma-separated list of tokens to signal the model to stop generating text.",
advanced=True,
),
StrInput(
name="system",
display_name="System",
info="System to use for generating text.",
advanced=True,
),
StrInput(
name="template",
display_name="Template",
info="Template to use for generating text.",
advanced=True,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
# Mapping mirostat settings to their corresponding values
mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
# Default to 0 for 'Disabled'
mirostat_value = mirostat_options.get(self.mirostat, 0) # type: ignore
# Set mirostat_eta and mirostat_tau to None if mirostat is disabled
if mirostat_value == 0:
mirostat_eta = None
mirostat_tau = None
else:
mirostat_eta = self.mirostat_eta
mirostat_tau = self.mirostat_tau
# Mapping system settings to their corresponding values
llm_params = {
"base_url": self.base_url,
"model": self.model_name,
"mirostat": mirostat_value,
"format": self.format,
"metadata": self.metadata,
"tags": self.tags.split(",") if self.tags else None,
"mirostat_eta": mirostat_eta,
"mirostat_tau": mirostat_tau,
"num_ctx": self.num_ctx or None,
"num_gpu": self.num_gpu or None,
"num_thread": self.num_thread or None,
"repeat_last_n": self.repeat_last_n or None,
"repeat_penalty": self.repeat_penalty or None,
"temperature": self.temperature or None,
"stop": self.stop_tokens.split(",") if self.stop_tokens else None,
"system": self.system,
"template": self.template,
"tfs_z": self.tfs_z or None,
"timeout": self.timeout or None,
"top_k": self.top_k or None,
"top_p": self.top_p or None,
"verbose": self.verbose,
}
# Remove parameters with None values
llm_params = {k: v for k, v in llm_params.items() if v is not None}
try:
output = ChatOllama(**llm_params) # type: ignore
except Exception as e:
raise ValueError("Could not initialize Ollama LLM.") from e
return output # type: ignore
OpenAI
The OpenAIModelComponent generates text using OpenAI’s language models. It builds and returns a ChatOpenAI model instance with the specified configurations.
Parameters
Name | Display Name | Info |
---|---|---|
max_tokens |
Max Tokens |
Maximum number of tokens to generate |
model_kwargs |
Model Kwargs |
Additional keyword arguments for the model |
json_mode |
JSON Mode |
Enable JSON output mode |
output_schema |
Schema |
Schema for the model’s output |
model_name |
Model Name |
Name of the OpenAI model to use |
openai_api_base |
OpenAI API Base |
Base URL for the OpenAI API |
api_key |
OpenAI API Key |
API key for authentication |
temperature |
Temperature |
Controls randomness in output |
seed |
Seed |
Seed for reproducibility |
Name | Display Name | Info |
---|---|---|
output |
Language Model |
Configured ChatOpenAI model instance |
Component code
OpenAI.py
import operator
from functools import reduce
from langflow.field_typing.range_spec import RangeSpec
from langchain_openai import ChatOpenAI
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
from langflow.field_typing import LanguageModel
from langflow.inputs import (
BoolInput,
DictInput,
DropdownInput,
FloatInput,
IntInput,
SecretStrInput,
StrInput,
)
class OpenAIModelComponent(LCModelComponent):
display_name = "OpenAI"
description = "Generates text using OpenAI LLMs."
icon = "OpenAI"
name = "OpenAIModel"
inputs = LCModelComponent._base_inputs + [
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
range_spec=RangeSpec(min=0, max=128000),
),
DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
BoolInput(
name="json_mode",
display_name="JSON Mode",
advanced=True,
info="If True, it will output JSON regardless of passing a schema.",
),
DictInput(
name="output_schema",
is_list=True,
display_name="Schema",
advanced=True,
info="The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=OPENAI_MODEL_NAMES,
value=OPENAI_MODEL_NAMES[0],
),
StrInput(
name="openai_api_base",
display_name="OpenAI API Base",
advanced=True,
info="The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
),
SecretStrInput(
name="api_key",
display_name="OpenAI API Key",
info="The OpenAI API Key to use for the OpenAI model.",
advanced=False,
value="OPENAI_API_KEY",
),
FloatInput(name="temperature", display_name="Temperature", value=0.1),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
# self.output_schema is a list of dictionaries
# let's convert it to a dictionary
output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
openai_api_key = self.api_key
temperature = self.temperature
model_name: str = self.model_name
max_tokens = self.max_tokens
model_kwargs = self.model_kwargs or {}
openai_api_base = self.openai_api_base or "https://api.openai.com/v1"
json_mode = bool(output_schema_dict) or self.json_mode
seed = self.seed
if openai_api_key:
api_key = SecretStr(openai_api_key)
else:
api_key = None
output = ChatOpenAI(
max_tokens=max_tokens or None,
model_kwargs=model_kwargs,
model=model_name,
base_url=openai_api_base,
api_key=api_key,
temperature=temperature if temperature is not None else 0.1,
seed=seed,
)
if json_mode:
if output_schema_dict:
output = output.with_structured_output(schema=output_schema_dict, method="json_mode") # type: ignore
else:
output = output.bind(response_format={"type": "json_object"}) # type: ignore
return output # type: ignore
def _get_exception_message(self, e: Exception):
"""
Get a message from an OpenAI exception.
Args:
exception (Exception): The exception to get the message from.
Returns:
str: The message from the exception.
"""
try:
from openai import BadRequestError
except ImportError:
return
if isinstance(e, BadRequestError):
message = e.body.get("message") # type: ignore
if message:
return message
return
Qianfan
This component generates text using Qianfan’s language models.
For more information, see the Qianfan documentation.
Component code
BaiduQianfanChat.py
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing.constants import LanguageModel
from langflow.io import DropdownInput, FloatInput, MessageTextInput, SecretStrInput
class QianfanChatEndpointComponent(LCModelComponent):
display_name: str = "Qianfan"
description: str = "Generate text using Baidu Qianfan LLMs."
documentation: str = "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint"
icon = "BaiduQianfan"
name = "BaiduQianfanChatModel"
inputs = LCModelComponent._base_inputs + [
DropdownInput(
name="model",
display_name="Model Name",
options=[
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"BLOOMZ-7B",
"Llama-2-7b-chat",
"Llama-2-13b-chat",
"Llama-2-70b-chat",
"Qianfan-BLOOMZ-7B-compressed",
"Qianfan-Chinese-Llama-2-7B",
"ChatGLM2-6B-32K",
"AquilaChat-7B",
],
info="https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint",
value="ERNIE-Bot-turbo",
),
SecretStrInput(
name="qianfan_ak",
display_name="Qianfan Ak",
info="which you could get from https://cloud.baidu.com/product/wenxinworkshop",
),
SecretStrInput(
name="qianfan_sk",
display_name="Qianfan Sk",
info="which you could get from https://cloud.baidu.com/product/wenxinworkshop",
),
FloatInput(
name="top_p",
display_name="Top p",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=0.8,
advanced=True,
),
FloatInput(
name="temperature",
display_name="Temperature",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=0.95,
),
FloatInput(
name="penalty_score",
display_name="Penalty Score",
info="Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo",
value=1.0,
advanced=True,
),
MessageTextInput(
name="endpoint",
display_name="Endpoint",
info="Endpoint of the Qianfan LLM, required if custom model used.",
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
model = self.model
qianfan_ak = self.qianfan_ak
qianfan_sk = self.qianfan_sk
top_p = self.top_p
temperature = self.temperature
penalty_score = self.penalty_score
endpoint = self.endpoint
try:
output = QianfanChatEndpoint( # type: ignore
model=model,
qianfan_ak=SecretStr(qianfan_ak) if qianfan_ak else None,
qianfan_sk=SecretStr(qianfan_sk) if qianfan_sk else None,
top_p=top_p,
temperature=temperature,
penalty_score=penalty_score,
endpoint=endpoint,
)
except Exception as e:
raise ValueError("Could not connect to Baidu Qianfan API.") from e
return output # type: ignore
Perplexity
This component generates text using Perplexity’s language models.
For more information, see the Perplexity documentation.
Parameters
Name | Type | Description |
---|---|---|
model_name |
String |
The name of the Perplexity model to use. Options include various Llama 3.1 models. |
max_output_tokens |
Integer |
The maximum number of tokens to generate. |
api_key |
SecretString |
The Perplexity API Key for authentication. |
temperature |
Float |
Controls randomness in the output. Default: 0.75. |
top_p |
Float |
The maximum cumulative probability of tokens to consider when sampling (advanced). |
n |
Integer |
Number of chat completions to generate for each prompt (advanced). |
top_k |
Integer |
Number of top tokens to consider for top-k sampling. Must be positive (advanced). |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatPerplexity configured with the specified parameters. |
Component code
Perplexity.py
from langchain_community.chat_models import ChatPerplexity
from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.io import FloatInput, SecretStrInput, DropdownInput, IntInput
class PerplexityComponent(LCModelComponent):
display_name = "Perplexity"
description = "Generate text using Perplexity LLMs."
documentation = "https://python.langchain.com/v0.2/docs/integrations/chat/perplexity/"
icon = "Perplexity"
name = "PerplexityModel"
inputs = LCModelComponent._base_inputs + [
DropdownInput(
name="model_name",
display_name="Model Name",
advanced=False,
options=[
"llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-huge-128k-online",
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-sonar-large-128k-chat",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
],
value="llama-3.1-sonar-small-128k-online",
),
IntInput(
name="max_output_tokens",
display_name="Max Output Tokens",
info="The maximum number of tokens to generate.",
),
SecretStrInput(
name="api_key",
display_name="Perplexity API Key",
info="The Perplexity API Key to use for the Perplexity model.",
advanced=False,
),
FloatInput(name="temperature", display_name="Temperature", value=0.75),
FloatInput(
name="top_p",
display_name="Top P",
info="The maximum cumulative probability of tokens to consider when sampling.",
advanced=True,
),
IntInput(
name="n",
display_name="N",
info="Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
advanced=True,
),
IntInput(
name="top_k",
display_name="Top K",
info="Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
advanced=True,
),
]
def build_model(self) -> LanguageModel: # type: ignore[type-var]
api_key = SecretStr(self.api_key).get_secret_value()
temperature = self.temperature
model = self.model_name
max_output_tokens = self.max_output_tokens
top_k = self.top_k
top_p = self.top_p
n = self.n
output = ChatPerplexity(
model=model,
temperature=temperature or 0.75,
pplx_api_key=api_key,
top_k=top_k or None,
top_p=top_p or None,
n=n or 1,
max_output_tokens=max_output_tokens,
)
return output # type: ignore
VertexAI
This component generates text using Vertex AI LLMs.
For more information, see Google Vertex AI documentation.
Parameters
Name | Type | Description |
---|---|---|
credentials |
File |
JSON credentials file. Leave empty to fallback to environment variables. File type: JSON. |
model_name |
String |
The name of the Vertex AI model to use. Default: "gemini-1.5-pro". |
project |
String |
The project ID (advanced). |
location |
String |
The location for the Vertex AI API. Default: "us-central1" (advanced). |
max_output_tokens |
Integer |
The maximum number of tokens to generate (advanced). |
max_retries |
Integer |
Maximum number of retries for API calls. Default: 1 (advanced). |
temperature |
Float |
Controls randomness in the output. Default: 0.0. |
top_k |
Integer |
The number of highest probability vocabulary tokens to keep for top-k-filtering (advanced). |
top_p |
Float |
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Default: 0.95 (advanced). |
verbose |
Boolean |
Whether to print verbose output. Default: False (advanced). |
Name | Type | Description |
---|---|---|
model |
LanguageModel |
An instance of ChatVertexAI configured with the specified parameters. |
Component code
VertexAi.py
from typing import cast
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.inputs import MessageTextInput
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, StrInput
class ChatVertexAIComponent(LCModelComponent):
display_name = "Vertex AI"
description = "Generate text using Vertex AI LLMs."
icon = "VertexAI"
name = "VertexAiModel"
inputs = LCModelComponent._base_inputs + [
FileInput(
name="credentials",
display_name="Credentials",
info="JSON credentials file. Leave empty to fallback to environment variables",
file_types=["json"],
),
MessageTextInput(name="model_name", display_name="Model Name", value="gemini-1.5-pro"),
StrInput(name="project", display_name="Project", info="The project ID.", advanced=True),
StrInput(name="location", display_name="Location", value="us-central1", advanced=True),
IntInput(name="max_output_tokens", display_name="Max Output Tokens", advanced=True),
IntInput(name="max_retries", display_name="Max Retries", value=1, advanced=True),
FloatInput(name="temperature", value=0.0, display_name="Temperature"),
IntInput(name="top_k", display_name="Top K", advanced=True),
FloatInput(name="top_p", display_name="Top P", value=0.95, advanced=True),
BoolInput(name="verbose", display_name="Verbose", value=False, advanced=True),
]
def build_model(self) -> LanguageModel:
try:
from langchain_google_vertexai import ChatVertexAI
except ImportError:
raise ImportError(
"Please install the langchain-google-vertexai package to use the VertexAIEmbeddings component."
)
location = self.location or None
if self.credentials:
from google.cloud import aiplatform
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(self.credentials)
project = self.project or credentials.project_id
# ChatVertexAI sometimes skip manual credentials initialization
aiplatform.init(
project=project,
location=location,
credentials=credentials,
)
else:
project = self.project or None
credentials = None
return cast(
LanguageModel,
ChatVertexAI(
credentials=credentials,
location=location,
project=project,
max_output_tokens=self.max_output_tokens or None,
max_retries=self.max_retries,
model_name=self.model_name,
temperature=self.temperature,
top_k=self.top_k or None,
top_p=self.top_p,
verbose=self.verbose,
),
)